aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig13
-rw-r--r--lib/Kconfig.debug154
-rw-r--r--lib/Kconfig.kasan112
-rw-r--r--lib/Kconfig.ubsan14
-rw-r--r--lib/Makefile15
-rw-r--r--lib/assoc_array.c9
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/bsearch.c2
-rw-r--r--lib/bust_spinlocks.c6
-rw-r--r--lib/chacha.c (renamed from lib/chacha20.c)59
-rw-r--r--lib/cordic.c23
-rw-r--r--lib/cpumask.c6
-rw-r--r--lib/crc32.c4
-rw-r--r--lib/debugobjects.c8
-rw-r--r--lib/devres.c4
-rw-r--r--lib/div64.c4
-rw-r--r--lib/dynamic_debug.c24
-rw-r--r--lib/find_bit_benchmark.c11
-rw-r--r--lib/flex_array.c398
-rw-r--r--lib/fonts/Kconfig10
-rw-r--r--lib/fonts/Makefile1
-rw-r--r--lib/fonts/font_ter16x32.c2072
-rw-r--r--lib/fonts/fonts.c4
-rw-r--r--lib/gcd.c2
-rw-r--r--lib/gen_crc64table.c2
-rw-r--r--lib/genalloc.c25
-rw-r--r--lib/generic-radix-tree.c217
-rw-r--r--lib/int_sqrt.c2
-rw-r--r--lib/iomap.c140
-rw-r--r--lib/ioremap.c103
-rw-r--r--lib/iov_iter.c102
-rw-r--r--lib/irq_poll.c2
-rw-r--r--lib/kobject.c4
-rw-r--r--lib/kobject_uevent.c13
-rw-r--r--lib/livepatch/Makefile15
-rw-r--r--lib/livepatch/test_klp_atomic_replace.c57
-rw-r--r--lib/livepatch/test_klp_callbacks_busy.c43
-rw-r--r--lib/livepatch/test_klp_callbacks_demo.c121
-rw-r--r--lib/livepatch/test_klp_callbacks_demo2.c93
-rw-r--r--lib/livepatch/test_klp_callbacks_mod.c24
-rw-r--r--lib/livepatch/test_klp_livepatch.c51
-rw-r--r--lib/livepatch/test_klp_shadow_vars.c258
-rw-r--r--lib/locking-selftest.c2
-rw-r--r--lib/lzo/lzo1x_compress.c135
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c73
-rw-r--r--lib/lzo/lzodefs.h21
-rw-r--r--lib/objagg.c1056
-rw-r--r--lib/percpu-refcount.c2
-rw-r--r--lib/raid6/Makefile22
-rw-r--r--lib/raid6/algos.c81
-rw-r--r--lib/raid6/neon.uc5
-rw-r--r--lib/raid6/recov_neon_inner.c19
-rw-r--r--lib/raid6/test/Makefile3
-rw-r--r--lib/refcount.c18
-rw-r--r--lib/rhashtable.c18
-rw-r--r--lib/sbitmap.c174
-rw-r--r--lib/scatterlist.c28
-rw-r--r--lib/seq_buf.c8
-rw-r--r--lib/show_mem.c5
-rw-r--r--lib/smp_processor_id.c7
-rw-r--r--lib/string.c20
-rw-r--r--lib/strncpy_from_user.c9
-rw-r--r--lib/strnlen_user.c9
-rw-r--r--lib/syscall.c57
-rw-r--r--lib/test_bpf.c16
-rw-r--r--lib/test_debug_virtual.c1
-rw-r--r--lib/test_firmware.c9
-rw-r--r--lib/test_kasan.c24
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/test_objagg.c1021
-rw-r--r--lib/test_printf.c61
-rw-r--r--lib/test_rhashtable.c68
-rw-r--r--lib/test_stackinit.c378
-rw-r--r--lib/test_ubsan.c11
-rw-r--r--lib/test_vmalloc.c551
-rw-r--r--lib/test_xarray.c333
-rw-r--r--lib/usercopy.c4
-rw-r--r--lib/vsprintf.c106
-rw-r--r--lib/xarray.c205
79 files changed, 7735 insertions, 1058 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index a9965f4af4dd..a9e56539bd11 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -10,6 +10,14 @@ menu "Library routines"
config RAID6_PQ
tristate
+config RAID6_PQ_BENCHMARK
+ bool "Automatically choose fastest RAID6 PQ functions"
+ depends on RAID6_PQ
+ default y
+ help
+ Benchmark all available RAID6 PQ functions on init and choose the
+ fastest one.
+
config BITREVERSE
tristate
@@ -577,7 +585,7 @@ config SG_POOL
# sg chaining option
#
-config ARCH_HAS_SG_CHAIN
+config ARCH_NO_SG_CHAIN
def_bool n
config ARCH_HAS_PMEM_API
@@ -624,3 +632,6 @@ config GENERIC_LIB_CMPDI2
config GENERIC_LIB_UCMPDI2
bool
+
+config OBJAGG
+ tristate "objagg" if COMPILE_TEST
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1af29b8224fd..d5a4a4036d2f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -17,6 +17,23 @@ config PRINTK_TIME
The behavior is also controlled by the kernel command line
parameter printk.time=1. See Documentation/admin-guide/kernel-parameters.rst
+config PRINTK_CALLER
+ bool "Show caller information on printks"
+ depends on PRINTK
+ help
+ Selecting this option causes printk() to add a caller "thread id" (if
+ in task context) or a caller "processor id" (if not in task context)
+ to every message.
+
+ This option is intended for environments where multiple threads
+ concurrently call printk() for many times, for it is difficult to
+ interpret without knowing where these lines (or sometimes individual
+ line which was divided into multiple lines due to race) came from.
+
+ Since toggling after boot makes the code racy, currently there is
+ no option to enable/disable at the kernel command line parameter or
+ sysfs interface.
+
config CONSOLE_LOGLEVEL_DEFAULT
int "Default console loglevel (1-15)"
range 1 15
@@ -179,6 +196,7 @@ config DEBUG_INFO_REDUCED
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
depends on DEBUG_INFO
+ depends on $(cc-option,-gsplit-dwarf)
help
Generate debug info into separate .dwo files. This significantly
reduces the build directory size for builds with DEBUG_INFO,
@@ -194,6 +212,7 @@ config DEBUG_INFO_SPLIT
config DEBUG_INFO_DWARF4
bool "Generate dwarf4 debuginfo"
depends on DEBUG_INFO
+ depends on $(cc-option,-gdwarf-4)
help
Generate dwarf4 debug info. This requires recent versions
of gcc and gdb. It makes the debug information larger.
@@ -222,7 +241,6 @@ config ENABLE_MUST_CHECK
config FRAME_WARN
int "Warn for stack frames larger than (needs gcc 4.4)"
range 0 8192
- default 3072 if KASAN_EXTRA
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 1280 if (!64BIT && PARISC)
default 1024 if (!64BIT && !PARISC)
@@ -266,23 +284,6 @@ config UNUSED_SYMBOLS
you really need it, and what the merge plan to the mainline kernel for
your module is.
-config PAGE_OWNER
- bool "Track page owner"
- depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
- select DEBUG_FS
- select STACKTRACE
- select STACKDEPOT
- select PAGE_EXTENSION
- help
- This keeps track of what call chain is the owner of a page, may
- help to find bare alloc_page(s) leaks. Even if you include this
- feature on your build, it is disabled in default. You should pass
- "page_owner=on" to boot parameter in order to enable it. Eats
- a fair amount of memory if enabled. See tools/vm/page_owner_sort.c
- for user-space helper.
-
- If unsure, say N.
-
config DEBUG_FS
bool "Debug Filesystem"
help
@@ -439,7 +440,7 @@ config DEBUG_KERNEL
menu "Memory Debugging"
-source mm/Kconfig.debug
+source "mm/Kconfig.debug"
config DEBUG_OBJECTS
bool "Debug object operations"
@@ -593,6 +594,21 @@ config DEBUG_KMEMLEAK_DEFAULT_OFF
Say Y here to disable kmemleak by default. It can then be enabled
on the command line via kmemleak=on.
+config DEBUG_KMEMLEAK_AUTO_SCAN
+ bool "Enable kmemleak auto scan thread on boot up"
+ default y
+ depends on DEBUG_KMEMLEAK
+ help
+ Depending on the cpu, kmemleak scan may be cpu intensive and can
+ stall user tasks at times. This option enables/disables automatic
+ kmemleak scan at boot up.
+
+ Say N here to disable kmemleak auto scan thread to stop automatic
+ scanning. Disabling this option disables automatic reporting of
+ memory leaks.
+
+ If unsure, say Y.
+
config DEBUG_STACK_USAGE
bool "Stack utilization instrumentation"
depends on DEBUG_KERNEL && !IA64
@@ -737,9 +753,9 @@ endmenu # "Memory Debugging"
config ARCH_HAS_KCOV
bool
help
- KCOV does not have any arch-specific code, but currently it is enabled
- only for x86_64. KCOV requires testing on other archs, and most likely
- disabling of instrumentation for some early boot code.
+ An architecture should select this when it can successfully
+ build and run with CONFIG_KCOV. This typically requires
+ disabling instrumentation for some early boot code.
config CC_HAS_SANCOV_TRACE_PC
def_bool $(cc-option,-fsanitize-coverage=trace-pc)
@@ -1609,7 +1625,7 @@ config LATENCYTOP
Enable this option if you want to use the LatencyTOP tool
to find out which userspace is blocking on what kernel operations.
-source kernel/trace/Kconfig
+source "kernel/trace/Kconfig"
config PROVIDE_OHCI1394_DMA_INIT
bool "Remote debugging over FireWire early on boot"
@@ -1640,42 +1656,6 @@ config PROVIDE_OHCI1394_DMA_INIT
See Documentation/debugging-via-ohci1394.txt for more information.
-config DMA_API_DEBUG
- bool "Enable debugging of DMA-API usage"
- select NEED_DMA_MAP_STATE
- help
- Enable this option to debug the use of the DMA API by device drivers.
- With this option you will be able to detect common bugs in device
- drivers like double-freeing of DMA mappings or freeing mappings that
- were never allocated.
-
- This also attempts to catch cases where a page owned by DMA is
- accessed by the cpu in a way that could cause data corruption. For
- example, this enables cow_user_page() to check that the source page is
- not undergoing DMA.
-
- This option causes a performance degradation. Use only if you want to
- debug device drivers and dma interactions.
-
- If unsure, say N.
-
-config DMA_API_DEBUG_SG
- bool "Debug DMA scatter-gather usage"
- default y
- depends on DMA_API_DEBUG
- help
- Perform extra checking that callers of dma_map_sg() have respected the
- appropriate segment length/boundary limits for the given device when
- preparing DMA scatterlists.
-
- This is particularly likely to have been overlooked in cases where the
- dma_map_sg() API is used for general bulk mapping of pages rather than
- preparing literal scatter-gather descriptors, where there is a risk of
- unexpected behaviour from DMA API implementations if the scatterlist
- is technically out-of-spec.
-
- If unsure, say N.
-
menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
def_bool y
@@ -1685,7 +1665,6 @@ if RUNTIME_TESTING_MENU
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_FS
- depends on BLOCK
help
This module enables testing of the different dumping mechanisms by
inducing system failures at predefined crash points.
@@ -1861,6 +1840,19 @@ config TEST_LKM
If unsure, say N.
+config TEST_VMALLOC
+ tristate "Test module for stress/performance analysis of vmalloc allocator"
+ default n
+ depends on MMU
+ depends on m
+ help
+ This builds the "test_vmalloc" module that should be used for
+ stress and performance analysis. So, any new change for vmalloc
+ subsystem can be evaluated from performance and stability point
+ of view.
+
+ If unsure, say N.
+
config TEST_USER_COPY
tristate "Test user/kernel boundary protections"
depends on m
@@ -1937,6 +1929,7 @@ config TEST_KMOD
depends on m
depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
depends on NETDEVICES && NET_CORE && INET # for TUN
+ depends on BLOCK
select TEST_LKM
select XFS_FS
select TUN
@@ -1976,6 +1969,47 @@ config TEST_MEMCAT_P
If unsure, say N.
+config TEST_LIVEPATCH
+ tristate "Test livepatching"
+ default n
+ depends on DYNAMIC_DEBUG
+ depends on LIVEPATCH
+ depends on m
+ help
+ Test kernel livepatching features for correctness. The tests will
+ load test modules that will be livepatched in various scenarios.
+
+ To run all the livepatching tests:
+
+ make -C tools/testing/selftests TARGETS=livepatch run_tests
+
+ Alternatively, individual tests may be invoked:
+
+ tools/testing/selftests/livepatch/test-callbacks.sh
+ tools/testing/selftests/livepatch/test-livepatch.sh
+ tools/testing/selftests/livepatch/test-shadow-vars.sh
+
+ If unsure, say N.
+
+config TEST_OBJAGG
+ tristate "Perform selftest on object aggreration manager"
+ default n
+ depends on OBJAGG
+ help
+ Enable this option to test object aggregation manager on boot
+ (or module load).
+
+
+config TEST_STACKINIT
+ tristate "Test level of stack variable initialization"
+ help
+ Test if the kernel is zero-initializing stack variables and
+ padding. Coverage is controlled by compiler flags,
+ CONFIG_GCC_PLUGIN_STRUCTLEAK, CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF,
+ or CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL.
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config MEMTEST
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index d0bad1bd9a2b..9950b660e62d 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -1,36 +1,82 @@
+# This config refers to the generic KASAN mode.
config HAVE_ARCH_KASAN
bool
-if HAVE_ARCH_KASAN
+config HAVE_ARCH_KASAN_SW_TAGS
+ bool
+
+config CC_HAS_KASAN_GENERIC
+ def_bool $(cc-option, -fsanitize=kernel-address)
+
+config CC_HAS_KASAN_SW_TAGS
+ def_bool $(cc-option, -fsanitize=kernel-hwaddress)
config KASAN
- bool "KASan: runtime memory debugger"
+ bool "KASAN: runtime memory debugger"
+ depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
+ (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
+ depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+ help
+ Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
+ designed to find out-of-bounds accesses and use-after-free bugs.
+ See Documentation/dev-tools/kasan.rst for details.
+
+choice
+ prompt "KASAN mode"
+ depends on KASAN
+ default KASAN_GENERIC
+ help
+ KASAN has two modes: generic KASAN (similar to userspace ASan,
+ x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC) and
+ software tag-based KASAN (a version based on software memory
+ tagging, arm64 only, similar to userspace HWASan, enabled with
+ CONFIG_KASAN_SW_TAGS).
+ Both generic and tag-based KASAN are strictly debugging features.
+
+config KASAN_GENERIC
+ bool "Generic mode"
+ depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
select STACKDEPOT
help
- Enables kernel address sanitizer - runtime memory debugger,
- designed to find out-of-bounds accesses and use-after-free bugs.
- This is strictly a debugging feature and it requires a gcc version
- of 4.9.2 or later. Detection of out of bounds accesses to stack or
- global variables requires gcc 5.0 or later.
- This feature consumes about 1/8 of available memory and brings about
- ~x3 performance slowdown.
+ Enables generic KASAN mode.
+ Supported in both GCC and Clang. With GCC it requires version 4.9.2
+ or later for basic support and version 5.0 or later for detection of
+ out-of-bounds accesses for stack and global variables and for inline
+ instrumentation mode (CONFIG_KASAN_INLINE). With Clang it requires
+ version 3.7.0 or later and it doesn't support detection of
+ out-of-bounds accesses for global variables yet.
+ This mode consumes about 1/8th of available memory at kernel start
+ and introduces an overhead of ~x1.5 for the rest of the allocations.
+ The performance slowdown is ~x3.
For better error detection enable CONFIG_STACKTRACE.
- Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB
+ Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot).
-config KASAN_EXTRA
- bool "KAsan: extra checks"
- depends on KASAN && DEBUG_KERNEL && !COMPILE_TEST
+config KASAN_SW_TAGS
+ bool "Software tag-based mode"
+ depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
+ depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+ select SLUB_DEBUG if SLUB
+ select CONSTRUCTORS
+ select STACKDEPOT
help
- This enables further checks in the kernel address sanitizer, for now
- it only includes the address-use-after-scope check that can lead
- to excessive kernel stack usage, frame size warnings and longer
- compile time.
- https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 has more
+ Enables software tag-based KASAN mode.
+ This mode requires Top Byte Ignore support by the CPU and therefore
+ is only supported for arm64.
+ This mode requires Clang version 7.0.0 or later.
+ This mode consumes about 1/16th of available memory at kernel start
+ and introduces an overhead of ~20% for the rest of the allocations.
+ This mode may potentially introduce problems relating to pointer
+ casting and comparison, as it embeds tags into the top byte of each
+ pointer.
+ For better error detection enable CONFIG_STACKTRACE.
+ Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB
+ (the resulting kernel does not boot).
+endchoice
choice
prompt "Instrumentation type"
@@ -53,10 +99,32 @@ config KASAN_INLINE
memory accesses. This is faster than outline (in some workloads
it gives about x2 boost over outline instrumentation), but
make kernel's .text size much bigger.
- This requires a gcc version of 5.0 or later.
+ For CONFIG_KASAN_GENERIC this requires GCC 5.0 or later.
endchoice
+config KASAN_STACK_ENABLE
+ bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
+ default !(CLANG_VERSION < 90000)
+ depends on KASAN
+ help
+ The LLVM stack address sanitizer has a know problem that
+ causes excessive stack usage in a lot of functions, see
+ https://bugs.llvm.org/show_bug.cgi?id=38809
+ Disabling asan-stack makes it safe to run kernels build
+ with clang-8 with KASAN enabled, though it loses some of
+ the functionality.
+ This feature is always disabled when compile-testing with clang-8
+ or earlier to avoid cluttering the output in stack overflow
+ warnings, but clang-8 users can still enable it for builds without
+ CONFIG_COMPILE_TEST. On gcc and later clang versions it is
+ assumed to always be safe to use and enabled by default.
+
+config KASAN_STACK
+ int
+ default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
+ default 0
+
config KASAN_S390_4_LEVEL_PAGING
bool "KASan: use 4-level paging"
depends on KASAN && S390
@@ -67,11 +135,9 @@ config KASAN_S390_4_LEVEL_PAGING
4-level paging instead.
config TEST_KASAN
- tristate "Module for testing kasan for bug detection"
+ tristate "Module for testing KASAN for bug detection"
depends on m && KASAN
help
This is a test module doing various nasty things like
out of bounds accesses, use after free. It is useful for testing
- kernel debugging features like kernel address sanitizer.
-
-endif
+ kernel debugging features like KASAN.
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 98fa559ebd80..a2ae4a8e4fa6 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -27,15 +27,19 @@ config UBSAN_SANITIZE_ALL
Enabling this option will get kernel image size increased
significantly.
-config UBSAN_ALIGNMENT
- bool "Enable checking of pointers alignment"
+config UBSAN_NO_ALIGNMENT
+ bool "Disable checking of pointers alignment"
depends on UBSAN
- default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS
+ default y if HAVE_EFFICIENT_UNALIGNED_ACCESS
help
- This option enables detection of unaligned memory accesses.
- Enabling this option on architectures that support unaligned
+ This option disables the check of unaligned memory accesses.
+ This option should be used when building allmodconfig.
+ Disabling this option on architectures that support unaligned
accesses may produce a lot of false positives.
+config UBSAN_ALIGNMENT
+ def_bool !UBSAN_NO_ALIGNMENT
+
config TEST_UBSAN
tristate "Module for testing for undefined behavior detection"
depends on m && UBSAN
diff --git a/lib/Makefile b/lib/Makefile
index db06d1237898..3b08673e8881 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -20,7 +20,7 @@ KCOV_INSTRUMENT_dynamic_debug.o := n
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
idr.o int_sqrt.o extable.o \
- sha1.o chacha20.o irq_regs.o argv_split.o \
+ sha1.o chacha.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
@@ -35,10 +35,11 @@ obj-y += lockref.o
obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
- gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
+ gcd.o lcm.o list_sort.o uuid.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o rhashtable.o reciprocal_div.o \
- once.o refcount.o usercopy.o errseq.o bucket_locks.o
+ once.o refcount.o usercopy.o errseq.o bucket_locks.o \
+ generic-radix-tree.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
@@ -60,6 +61,7 @@ UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
obj-$(CONFIG_TEST_LKM) += test_module.o
+obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o
obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
obj-$(CONFIG_TEST_SORT) += test_sort.o
@@ -75,6 +77,10 @@ obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
+obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
+obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o
+
+obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -207,7 +213,7 @@ KCOV_INSTRUMENT_stackdepot.o := n
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
fdt_empty_tree.o
$(foreach file, $(libfdt_files), \
- $(eval CFLAGS_$(file) = -I$(src)/../scripts/dtc/libfdt))
+ $(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
@@ -274,3 +280,4 @@ obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o
obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o
obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
+obj-$(CONFIG_OBJAGG) += objagg.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index c6659cb37033..edc3c14af41d 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -768,9 +768,11 @@ all_leaves_cluster_together:
new_s0->index_key[i] =
ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
- blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
- pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
- new_s0->index_key[keylen - 1] &= ~blank;
+ if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) {
+ blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
+ new_s0->index_key[keylen - 1] &= ~blank;
+ }
/* This now reduces to a node splitting exercise for which we'll need
* to regenerate the disparity table.
@@ -1115,6 +1117,7 @@ struct assoc_array_edit *assoc_array_delete(struct assoc_array *array,
index_key))
goto found_leaf;
}
+ /* fall through */
case assoc_array_walk_tree_empty:
case assoc_array_walk_found_wrong_shortcut:
default:
diff --git a/lib/bitmap.c b/lib/bitmap.c
index eead55aa7170..98872e9025da 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -443,7 +443,7 @@ int bitmap_parse_user(const char __user *ubuf,
unsigned int ulen, unsigned long *maskp,
int nmaskbits)
{
- if (!access_ok(VERIFY_READ, ubuf, ulen))
+ if (!access_ok(ubuf, ulen))
return -EFAULT;
return __bitmap_parse((const char __force *)ubuf,
ulen, 1, maskp, nmaskbits);
@@ -641,7 +641,7 @@ int bitmap_parselist_user(const char __user *ubuf,
unsigned int ulen, unsigned long *maskp,
int nmaskbits)
{
- if (!access_ok(VERIFY_READ, ubuf, ulen))
+ if (!access_ok(ubuf, ulen))
return -EFAULT;
return __bitmap_parselist((const char __force *)ubuf,
ulen, 1, maskp, nmaskbits);
diff --git a/lib/bsearch.c b/lib/bsearch.c
index 18b445b010c3..82512fe7b33c 100644
--- a/lib/bsearch.c
+++ b/lib/bsearch.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/bsearch.h>
+#include <linux/kprobes.h>
/*
* bsearch - binary search an array of elements
@@ -53,3 +54,4 @@ void *bsearch(const void *key, const void *base, size_t num, size_t size,
return NULL;
}
EXPORT_SYMBOL(bsearch);
+NOKPROBE_SYMBOL(bsearch);
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index ab719495e2cb..8be59f84eaea 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -2,7 +2,8 @@
/*
* lib/bust_spinlocks.c
*
- * Provides a minimal bust_spinlocks for architectures which don't have one of their own.
+ * Provides a minimal bust_spinlocks for architectures which don't
+ * have one of their own.
*
* bust_spinlocks() clears any spinlocks which would prevent oops, die(), BUG()
* and panic() information from reaching the user.
@@ -16,8 +17,7 @@
#include <linux/vt_kern.h>
#include <linux/console.h>
-
-void __attribute__((weak)) bust_spinlocks(int yes)
+void bust_spinlocks(int yes)
{
if (yes) {
++oops_in_progress;
diff --git a/lib/chacha20.c b/lib/chacha.c
index d907fec6a9ed..a46d2832dbab 100644
--- a/lib/chacha20.c
+++ b/lib/chacha.c
@@ -1,5 +1,5 @@
/*
- * ChaCha20 256-bit cipher algorithm, RFC7539
+ * The "hash function" used as the core of the ChaCha stream cipher (RFC7539)
*
* Copyright (C) 2015 Martin Willi
*
@@ -14,17 +14,16 @@
#include <linux/bitops.h>
#include <linux/cryptohash.h>
#include <asm/unaligned.h>
-#include <crypto/chacha20.h>
+#include <crypto/chacha.h>
-void chacha20_block(u32 *state, u8 *stream)
+static void chacha_permute(u32 *x, int nrounds)
{
- u32 x[16];
int i;
- for (i = 0; i < ARRAY_SIZE(x); i++)
- x[i] = state[i];
+ /* whitelist the allowed round counts */
+ WARN_ON_ONCE(nrounds != 20 && nrounds != 12);
- for (i = 0; i < 20; i += 2) {
+ for (i = 0; i < nrounds; i += 2) {
x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16);
x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16);
x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16);
@@ -65,10 +64,54 @@ void chacha20_block(u32 *state, u8 *stream)
x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7);
x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7);
}
+}
+
+/**
+ * chacha_block - generate one keystream block and increment block counter
+ * @state: input state matrix (16 32-bit words)
+ * @stream: output keystream block (64 bytes)
+ * @nrounds: number of rounds (20 or 12; 20 is recommended)
+ *
+ * This is the ChaCha core, a function from 64-byte strings to 64-byte strings.
+ * The caller has already converted the endianness of the input. This function
+ * also handles incrementing the block counter in the input matrix.
+ */
+void chacha_block(u32 *state, u8 *stream, int nrounds)
+{
+ u32 x[16];
+ int i;
+
+ memcpy(x, state, 64);
+
+ chacha_permute(x, nrounds);
for (i = 0; i < ARRAY_SIZE(x); i++)
put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]);
state[12]++;
}
-EXPORT_SYMBOL(chacha20_block);
+EXPORT_SYMBOL(chacha_block);
+
+/**
+ * hchacha_block - abbreviated ChaCha core, for XChaCha
+ * @in: input state matrix (16 32-bit words)
+ * @out: output (8 32-bit words)
+ * @nrounds: number of rounds (20 or 12; 20 is recommended)
+ *
+ * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step
+ * towards XChaCha (see https://cr.yp.to/snuffle/xsalsa-20081128.pdf). HChaCha
+ * skips the final addition of the initial state, and outputs only certain words
+ * of the state. It should not be used for streaming directly.
+ */
+void hchacha_block(const u32 *in, u32 *out, int nrounds)
+{
+ u32 x[16];
+
+ memcpy(x, in, 64);
+
+ chacha_permute(x, nrounds);
+
+ memcpy(&out[0], &x[0], 16);
+ memcpy(&out[4], &x[12], 16);
+}
+EXPORT_SYMBOL(hchacha_block);
diff --git a/lib/cordic.c b/lib/cordic.c
index 6cf477839ebd..8ef27c12956f 100644
--- a/lib/cordic.c
+++ b/lib/cordic.c
@@ -16,15 +16,6 @@
#include <linux/module.h>
#include <linux/cordic.h>
-#define CORDIC_ANGLE_GEN 39797
-#define CORDIC_PRECISION_SHIFT 16
-#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2)
-
-#define FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT))
-#define FLOAT(X) (((X) >= 0) \
- ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \
- : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1))
-
static const s32 arctan_table[] = {
2949120,
1740967,
@@ -64,16 +55,16 @@ struct cordic_iq cordic_calc_iq(s32 theta)
coord.q = 0;
angle = 0;
- theta = FIXED(theta);
+ theta = CORDIC_FIXED(theta);
signtheta = (theta < 0) ? -1 : 1;
- theta = ((theta + FIXED(180) * signtheta) % FIXED(360)) -
- FIXED(180) * signtheta;
+ theta = ((theta + CORDIC_FIXED(180) * signtheta) % CORDIC_FIXED(360)) -
+ CORDIC_FIXED(180) * signtheta;
- if (FLOAT(theta) > 90) {
- theta -= FIXED(180);
+ if (CORDIC_FLOAT(theta) > 90) {
+ theta -= CORDIC_FIXED(180);
signx = -1;
- } else if (FLOAT(theta) < -90) {
- theta += FIXED(180);
+ } else if (CORDIC_FLOAT(theta) < -90) {
+ theta += CORDIC_FIXED(180);
signx = -1;
}
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 8d666ab84b5c..0cb672eb107c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -5,6 +5,7 @@
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/memblock.h>
+#include <linux/numa.h>
/**
* cpumask_next - get the next cpu in a cpumask
@@ -164,6 +165,9 @@ EXPORT_SYMBOL(zalloc_cpumask_var);
void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
*mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
+ if (!*mask)
+ panic("%s: Failed to allocate %u bytes\n", __func__,
+ cpumask_size());
}
/**
@@ -206,7 +210,7 @@ unsigned int cpumask_local_spread(unsigned int i, int node)
/* Wrap: we always want a cpu. */
i %= num_online_cpus();
- if (node == -1) {
+ if (node == NUMA_NO_NODE) {
for_each_cpu(cpu, cpu_online_mask)
if (i-- == 0)
return cpu;
diff --git a/lib/crc32.c b/lib/crc32.c
index 45b1d67a1767..4a20455d1f61 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -206,8 +206,8 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(__crc32c_le);
-u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
-u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
+u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
+u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
/*
* This multiplies the polynomials x and y modulo the given modulus.
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 14afeeb7d6ef..55437fd5128b 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -1131,11 +1131,10 @@ static int __init debug_objects_replace_static_objects(void)
}
/*
- * When debug_objects_mem_init() is called we know that only
- * one CPU is up, so disabling interrupts is enough
- * protection. This avoids the lockdep hell of lock ordering.
+ * debug_objects_mem_init() is now called early that only one CPU is up
+ * and interrupts have been disabled, so it is safe to replace the
+ * active object references.
*/
- local_irq_disable();
/* Remove the statically allocated objects from the pool */
hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
@@ -1156,7 +1155,6 @@ static int __init debug_objects_replace_static_objects(void)
cnt++;
}
}
- local_irq_enable();
pr_debug("%d of %d active objects replaced\n",
cnt, obj_pool_used);
diff --git a/lib/devres.c b/lib/devres.c
index faccf1a037d0..69bed2f38306 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -134,7 +134,6 @@ EXPORT_SYMBOL(devm_iounmap);
void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
{
resource_size_t size;
- const char *name;
void __iomem *dest_ptr;
BUG_ON(!dev);
@@ -145,9 +144,8 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
}
size = resource_size(res);
- name = res->name ?: dev_name(dev);
- if (!devm_request_mem_region(dev, res->start, size, name)) {
+ if (!devm_request_mem_region(dev, res->start, size, dev_name(dev))) {
dev_err(dev, "can't request region for resource %pR\n", res);
return IOMEM_ERR_PTR(-EBUSY);
}
diff --git a/lib/div64.c b/lib/div64.c
index 01c8602bb6ff..ee146bb4c558 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -109,7 +109,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
quot = div_u64_rem(dividend, divisor, &rem32);
*remainder = rem32;
} else {
- int n = 1 + fls(high);
+ int n = fls(high);
quot = div_u64(dividend >> n, divisor >> n);
if (quot != 0)
@@ -147,7 +147,7 @@ u64 div64_u64(u64 dividend, u64 divisor)
if (high == 0) {
quot = div_u64(dividend, divisor);
} else {
- int n = 1 + fls(high);
+ int n = fls(high);
quot = div_u64(dividend >> n, divisor >> n);
if (quot != 0)
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index c7c96bc7654a..7bdf98c37e91 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -188,7 +188,7 @@ static int ddebug_change(const struct ddebug_query *query,
newflags = (dp->flags & mask) | flags;
if (newflags == dp->flags)
continue;
-#ifdef HAVE_JUMP_LABEL
+#ifdef CONFIG_JUMP_LABEL
if (dp->flags & _DPRINTK_FLAGS_PRINT) {
if (!(flags & _DPRINTK_FLAGS_PRINT))
static_branch_disable(&dp->key.dd_key_true);
@@ -847,17 +847,19 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
const char *name)
{
struct ddebug_table *dt;
- const char *new_name;
dt = kzalloc(sizeof(*dt), GFP_KERNEL);
- if (dt == NULL)
- return -ENOMEM;
- new_name = kstrdup_const(name, GFP_KERNEL);
- if (new_name == NULL) {
- kfree(dt);
+ if (dt == NULL) {
+ pr_err("error adding module: %s\n", name);
return -ENOMEM;
}
- dt->mod_name = new_name;
+ /*
+ * For built-in modules, name lives in .rodata and is
+ * immortal. For loaded modules, name points at the name[]
+ * member of struct module, which lives at least as long as
+ * this struct ddebug_table.
+ */
+ dt->mod_name = name;
dt->num_ddebugs = n;
dt->ddebugs = tab;
@@ -868,7 +870,6 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
vpr_info("%u debug prints in module %s\n", n, dt->mod_name);
return 0;
}
-EXPORT_SYMBOL_GPL(ddebug_add_module);
/* helper for ddebug_dyndbg_(boot|module)_param_cb */
static int ddebug_dyndbg_param_cb(char *param, char *val,
@@ -913,7 +914,6 @@ int ddebug_dyndbg_module_param_cb(char *param, char *val, const char *module)
static void ddebug_table_free(struct ddebug_table *dt)
{
list_del_init(&dt->link);
- kfree_const(dt->mod_name);
kfree(dt);
}
@@ -930,15 +930,15 @@ int ddebug_remove_module(const char *mod_name)
mutex_lock(&ddebug_lock);
list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) {
- if (!strcmp(dt->mod_name, mod_name)) {
+ if (dt->mod_name == mod_name) {
ddebug_table_free(dt);
ret = 0;
+ break;
}
}
mutex_unlock(&ddebug_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(ddebug_remove_module);
static void ddebug_remove_all_tables(void)
{
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
index 5367ffa5c18f..f0e394dd2beb 100644
--- a/lib/find_bit_benchmark.c
+++ b/lib/find_bit_benchmark.c
@@ -108,14 +108,13 @@ static int __init test_find_next_and_bit(const void *bitmap,
const void *bitmap2, unsigned long len)
{
unsigned long i, cnt;
- cycles_t cycles;
+ ktime_t time;
- cycles = get_cycles();
+ time = ktime_get();
for (cnt = i = 0; i < BITMAP_LEN; cnt++)
- i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i+1);
- cycles = get_cycles() - cycles;
- pr_err("find_next_and_bit:\t\t%llu cycles, %ld iterations\n",
- (u64)cycles, cnt);
+ i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i + 1);
+ time = ktime_get() - time;
+ pr_err("find_next_and_bit: %18llu ns, %6ld iterations\n", time, cnt);
return 0;
}
diff --git a/lib/flex_array.c b/lib/flex_array.c
deleted file mode 100644
index 2eed22fa507c..000000000000
--- a/lib/flex_array.c
+++ /dev/null
@@ -1,398 +0,0 @@
-/*
- * Flexible array managed in PAGE_SIZE parts
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright IBM Corporation, 2009
- *
- * Author: Dave Hansen <dave@linux.vnet.ibm.com>
- */
-
-#include <linux/flex_array.h>
-#include <linux/slab.h>
-#include <linux/stddef.h>
-#include <linux/export.h>
-#include <linux/reciprocal_div.h>
-
-struct flex_array_part {
- char elements[FLEX_ARRAY_PART_SIZE];
-};
-
-/*
- * If a user requests an allocation which is small
- * enough, we may simply use the space in the
- * flex_array->parts[] array to store the user
- * data.
- */
-static inline int elements_fit_in_base(struct flex_array *fa)
-{
- int data_size = fa->element_size * fa->total_nr_elements;
- if (data_size <= FLEX_ARRAY_BASE_BYTES_LEFT)
- return 1;
- return 0;
-}
-
-/**
- * flex_array_alloc - allocate a new flexible array
- * @element_size: the size of individual elements in the array
- * @total: total number of elements that this should hold
- * @flags: page allocation flags to use for base array
- *
- * Note: all locking must be provided by the caller.
- *
- * @total is used to size internal structures. If the user ever
- * accesses any array indexes >=@total, it will produce errors.
- *
- * The maximum number of elements is defined as: the number of
- * elements that can be stored in a page times the number of
- * page pointers that we can fit in the base structure or (using
- * integer math):
- *
- * (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
- *
- * Here's a table showing example capacities. Note that the maximum
- * index that the get/put() functions is just nr_objects-1. This
- * basically means that you get 4MB of storage on 32-bit and 2MB on
- * 64-bit.
- *
- *
- * Element size | Objects | Objects |
- * PAGE_SIZE=4k | 32-bit | 64-bit |
- * ---------------------------------|
- * 1 bytes | 4177920 | 2088960 |
- * 2 bytes | 2088960 | 1044480 |
- * 3 bytes | 1392300 | 696150 |
- * 4 bytes | 1044480 | 522240 |
- * 32 bytes | 130560 | 65408 |
- * 33 bytes | 126480 | 63240 |
- * 2048 bytes | 2040 | 1020 |
- * 2049 bytes | 1020 | 510 |
- * void * | 1044480 | 261120 |
- *
- * Since 64-bit pointers are twice the size, we lose half the
- * capacity in the base structure. Also note that no effort is made
- * to efficiently pack objects across page boundaries.
- */
-struct flex_array *flex_array_alloc(int element_size, unsigned int total,
- gfp_t flags)
-{
- struct flex_array *ret;
- int elems_per_part = 0;
- int max_size = 0;
- struct reciprocal_value reciprocal_elems = { 0 };
-
- if (element_size) {
- elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
- reciprocal_elems = reciprocal_value(elems_per_part);
- max_size = FLEX_ARRAY_NR_BASE_PTRS * elems_per_part;
- }
-
- /* max_size will end up 0 if element_size > PAGE_SIZE */
- if (total > max_size)
- return NULL;
- ret = kzalloc(sizeof(struct flex_array), flags);
- if (!ret)
- return NULL;
- ret->element_size = element_size;
- ret->total_nr_elements = total;
- ret->elems_per_part = elems_per_part;
- ret->reciprocal_elems = reciprocal_elems;
- if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
- memset(&ret->parts[0], FLEX_ARRAY_FREE,
- FLEX_ARRAY_BASE_BYTES_LEFT);
- return ret;
-}
-EXPORT_SYMBOL(flex_array_alloc);
-
-static int fa_element_to_part_nr(struct flex_array *fa,
- unsigned int element_nr)
-{
- /*
- * if element_size == 0 we don't get here, so we never touch
- * the zeroed fa->reciprocal_elems, which would yield invalid
- * results
- */
- return reciprocal_divide(element_nr, fa->reciprocal_elems);
-}
-
-/**
- * flex_array_free_parts - just free the second-level pages
- * @fa: the flex array from which to free parts
- *
- * This is to be used in cases where the base 'struct flex_array'
- * has been statically allocated and should not be free.
- */
-void flex_array_free_parts(struct flex_array *fa)
-{
- int part_nr;
-
- if (elements_fit_in_base(fa))
- return;
- for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++)
- kfree(fa->parts[part_nr]);
-}
-EXPORT_SYMBOL(flex_array_free_parts);
-
-void flex_array_free(struct flex_array *fa)
-{
- flex_array_free_parts(fa);
- kfree(fa);
-}
-EXPORT_SYMBOL(flex_array_free);
-
-static unsigned int index_inside_part(struct flex_array *fa,
- unsigned int element_nr,
- unsigned int part_nr)
-{
- unsigned int part_offset;
-
- part_offset = element_nr - part_nr * fa->elems_per_part;
- return part_offset * fa->element_size;
-}
-
-static struct flex_array_part *
-__fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
-{
- struct flex_array_part *part = fa->parts[part_nr];
- if (!part) {
- part = kmalloc(sizeof(struct flex_array_part), flags);
- if (!part)
- return NULL;
- if (!(flags & __GFP_ZERO))
- memset(part, FLEX_ARRAY_FREE,
- sizeof(struct flex_array_part));
- fa->parts[part_nr] = part;
- }
- return part;
-}
-
-/**
- * flex_array_put - copy data into the array at @element_nr
- * @fa: the flex array to copy data into
- * @element_nr: index of the position in which to insert
- * the new element.
- * @src: address of data to copy into the array
- * @flags: page allocation flags to use for array expansion
- *
- *
- * Note that this *copies* the contents of @src into
- * the array. If you are trying to store an array of
- * pointers, make sure to pass in &ptr instead of ptr.
- * You may instead wish to use the flex_array_put_ptr()
- * helper function.
- *
- * Locking must be provided by the caller.
- */
-int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
- gfp_t flags)
-{
- int part_nr = 0;
- struct flex_array_part *part;
- void *dst;
-
- if (element_nr >= fa->total_nr_elements)
- return -ENOSPC;
- if (!fa->element_size)
- return 0;
- if (elements_fit_in_base(fa))
- part = (struct flex_array_part *)&fa->parts[0];
- else {
- part_nr = fa_element_to_part_nr(fa, element_nr);
- part = __fa_get_part(fa, part_nr, flags);
- if (!part)
- return -ENOMEM;
- }
- dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
- memcpy(dst, src, fa->element_size);
- return 0;
-}
-EXPORT_SYMBOL(flex_array_put);
-
-/**
- * flex_array_clear - clear element in array at @element_nr
- * @fa: the flex array of the element.
- * @element_nr: index of the position to clear.
- *
- * Locking must be provided by the caller.
- */
-int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
-{
- int part_nr = 0;
- struct flex_array_part *part;
- void *dst;
-
- if (element_nr >= fa->total_nr_elements)
- return -ENOSPC;
- if (!fa->element_size)
- return 0;
- if (elements_fit_in_base(fa))
- part = (struct flex_array_part *)&fa->parts[0];
- else {
- part_nr = fa_element_to_part_nr(fa, element_nr);
- part = fa->parts[part_nr];
- if (!part)
- return -EINVAL;
- }
- dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
- memset(dst, FLEX_ARRAY_FREE, fa->element_size);
- return 0;
-}
-EXPORT_SYMBOL(flex_array_clear);
-
-/**
- * flex_array_prealloc - guarantee that array space exists
- * @fa: the flex array for which to preallocate parts
- * @start: index of first array element for which space is allocated
- * @nr_elements: number of elements for which space is allocated
- * @flags: page allocation flags
- *
- * This will guarantee that no future calls to flex_array_put()
- * will allocate memory. It can be used if you are expecting to
- * be holding a lock or in some atomic context while writing
- * data into the array.
- *
- * Locking must be provided by the caller.
- */
-int flex_array_prealloc(struct flex_array *fa, unsigned int start,
- unsigned int nr_elements, gfp_t flags)
-{
- int start_part;
- int end_part;
- int part_nr;
- unsigned int end;
- struct flex_array_part *part;
-
- if (!start && !nr_elements)
- return 0;
- if (start >= fa->total_nr_elements)
- return -ENOSPC;
- if (!nr_elements)
- return 0;
-
- end = start + nr_elements - 1;
-
- if (end >= fa->total_nr_elements)
- return -ENOSPC;
- if (!fa->element_size)
- return 0;
- if (elements_fit_in_base(fa))
- return 0;
- start_part = fa_element_to_part_nr(fa, start);
- end_part = fa_element_to_part_nr(fa, end);
- for (part_nr = start_part; part_nr <= end_part; part_nr++) {
- part = __fa_get_part(fa, part_nr, flags);
- if (!part)
- return -ENOMEM;
- }
- return 0;
-}
-EXPORT_SYMBOL(flex_array_prealloc);
-
-/**
- * flex_array_get - pull data back out of the array
- * @fa: the flex array from which to extract data
- * @element_nr: index of the element to fetch from the array
- *
- * Returns a pointer to the data at index @element_nr. Note
- * that this is a copy of the data that was passed in. If you
- * are using this to store pointers, you'll get back &ptr. You
- * may instead wish to use the flex_array_get_ptr helper.
- *
- * Locking must be provided by the caller.
- */
-void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
-{
- int part_nr = 0;
- struct flex_array_part *part;
-
- if (!fa->element_size)
- return NULL;
- if (element_nr >= fa->total_nr_elements)
- return NULL;
- if (elements_fit_in_base(fa))
- part = (struct flex_array_part *)&fa->parts[0];
- else {
- part_nr = fa_element_to_part_nr(fa, element_nr);
- part = fa->parts[part_nr];
- if (!part)
- return NULL;
- }
- return &part->elements[index_inside_part(fa, element_nr, part_nr)];
-}
-EXPORT_SYMBOL(flex_array_get);
-
-/**
- * flex_array_get_ptr - pull a ptr back out of the array
- * @fa: the flex array from which to extract data
- * @element_nr: index of the element to fetch from the array
- *
- * Returns the pointer placed in the flex array at element_nr using
- * flex_array_put_ptr(). This function should not be called if the
- * element in question was not set using the _put_ptr() helper.
- */
-void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr)
-{
- void **tmp;
-
- tmp = flex_array_get(fa, element_nr);
- if (!tmp)
- return NULL;
-
- return *tmp;
-}
-EXPORT_SYMBOL(flex_array_get_ptr);
-
-static int part_is_free(struct flex_array_part *part)
-{
- int i;
-
- for (i = 0; i < sizeof(struct flex_array_part); i++)
- if (part->elements[i] != FLEX_ARRAY_FREE)
- return 0;
- return 1;
-}
-
-/**
- * flex_array_shrink - free unused second-level pages
- * @fa: the flex array to shrink
- *
- * Frees all second-level pages that consist solely of unused
- * elements. Returns the number of pages freed.
- *
- * Locking must be provided by the caller.
- */
-int flex_array_shrink(struct flex_array *fa)
-{
- struct flex_array_part *part;
- int part_nr;
- int ret = 0;
-
- if (!fa->total_nr_elements || !fa->element_size)
- return 0;
- if (elements_fit_in_base(fa))
- return ret;
- for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) {
- part = fa->parts[part_nr];
- if (!part)
- continue;
- if (part_is_free(part)) {
- fa->parts[part_nr] = NULL;
- kfree(part);
- ret++;
- }
- }
- return ret;
-}
-EXPORT_SYMBOL(flex_array_shrink);
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 8fa0791e8a1e..3ecdd5204ec5 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -109,6 +109,15 @@ config FONT_SUN12x22
big letters (like the letters used in the SPARC PROM). If the
standard font is unreadable for you, say Y, otherwise say N.
+config FONT_TER16x32
+ bool "Terminus 16x32 font (not supported by all drivers)"
+ depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC)
+ help
+ Terminus Font is a clean, fixed width bitmap font, designed
+ for long (8 and more hours per day) work with computers.
+ This is the high resolution, large version for use with HiDPI screens.
+ If the standard font is unreadable for you, say Y, otherwise say N.
+
config FONT_AUTOSELECT
def_bool y
depends on !FONT_8x8
@@ -121,6 +130,7 @@ config FONT_AUTOSELECT
depends on !FONT_SUN8x16
depends on !FONT_SUN12x22
depends on !FONT_10x18
+ depends on !FONT_TER16x32
select FONT_8x16
endif # FONT_SUPPORT
diff --git a/lib/fonts/Makefile b/lib/fonts/Makefile
index d56f02dea83a..ed95070860de 100644
--- a/lib/fonts/Makefile
+++ b/lib/fonts/Makefile
@@ -14,6 +14,7 @@ font-objs-$(CONFIG_FONT_PEARL_8x8) += font_pearl_8x8.o
font-objs-$(CONFIG_FONT_ACORN_8x8) += font_acorn_8x8.o
font-objs-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o
font-objs-$(CONFIG_FONT_6x10) += font_6x10.o
+font-objs-$(CONFIG_FONT_TER16x32) += font_ter16x32.o
font-objs += $(font-objs-y)
diff --git a/lib/fonts/font_ter16x32.c b/lib/fonts/font_ter16x32.c
new file mode 100644
index 000000000000..3f0cf1ccdf3a
--- /dev/null
+++ b/lib/fonts/font_ter16x32.c
@@ -0,0 +1,2072 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/font.h>
+#include <linux/module.h>
+
+#define FONTDATAMAX 16384
+
+static const unsigned char fontdata_ter16x32[FONTDATAMAX] = {
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8, 0x7f, 0xfc,
+ 0xf0, 0x1e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e,
+ 0xee, 0xee, 0xee, 0xee, 0xe0, 0x0e, 0xe0, 0x0e,
+ 0xe0, 0x0e, 0xe0, 0x0e, 0xef, 0xee, 0xe7, 0xce,
+ 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xf0, 0x1e,
+ 0x7f, 0xfc, 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8, 0x7f, 0xfc,
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0xe3, 0x8e, 0xe3, 0x8e, 0xff, 0xfe, 0xff, 0xfe,
+ 0xff, 0xfe, 0xff, 0xfe, 0xe0, 0x0e, 0xf0, 0x1e,
+ 0xf8, 0x3e, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0x7f, 0xfc, 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 2 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x78, 0x3c, 0xfc, 0x7e, 0xfe, 0xfe, 0xff, 0xfe,
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x3f, 0xf8, 0x1f, 0xf0,
+ 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 3 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0f, 0xe0,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x7f, 0xfc, 0xff, 0xfe,
+ 0xff, 0xfe, 0x7f, 0xfc, 0x3f, 0xf8, 0x1f, 0xf0,
+ 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 4 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x0f, 0xe0,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x0f, 0xe0, 0x0f, 0xe0,
+ 0x07, 0xc0, 0x03, 0x80, 0x3b, 0xb8, 0x7f, 0xfc,
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
+ 0x7f, 0xfc, 0x3b, 0xb8, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 5 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x07, 0xc0, 0x0f, 0xe0, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0xff, 0xfe, 0xff, 0xfe,
+ 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0x7b, 0xbc,
+ 0x3b, 0xb8, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0xc0, 0x07, 0xe0, 0x0f, 0xf0, 0x0f, 0xf0,
+ 0x0f, 0xf0, 0x0f, 0xf0, 0x07, 0xe0, 0x03, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xfc, 0x3f, 0xf8, 0x1f, 0xf0, 0x0f, 0xf0, 0x0f,
+ 0xf0, 0x0f, 0xf0, 0x0f, 0xf8, 0x1f, 0xfc, 0x3f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 8 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0xc0, 0x07, 0xe0, 0x0e, 0x70, 0x0c, 0x30,
+ 0x0c, 0x30, 0x0e, 0x70, 0x07, 0xe0, 0x03, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 9 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xfc, 0x3f, 0xf8, 0x1f, 0xf1, 0x8f, 0xf3, 0xcf,
+ 0xf3, 0xcf, 0xf1, 0x8f, 0xf8, 0x1f, 0xfc, 0x3f,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 10 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0xfe, 0x03, 0xfe,
+ 0x00, 0x1e, 0x00, 0x3e, 0x00, 0x76, 0x00, 0xe6,
+ 0x01, 0xc6, 0x03, 0x86, 0x3f, 0xe0, 0x7f, 0xf0,
+ 0xf0, 0x78, 0xe0, 0x38, 0xe0, 0x38, 0xe0, 0x38,
+ 0xe0, 0x38, 0xe0, 0x38, 0xe0, 0x38, 0xf0, 0x78,
+ 0x7f, 0xf0, 0x3f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 11 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8,
+ 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 12 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3f, 0xfc, 0x3f, 0xfc,
+ 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c,
+ 0x3f, 0xfc, 0x3f, 0xfc, 0x38, 0x00, 0x38, 0x00,
+ 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00,
+ 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00,
+ 0xf8, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 13 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfe, 0x7f, 0xfe,
+ 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
+ 0x7f, 0xfe, 0x7f, 0xfe, 0x70, 0x0e, 0x70, 0x0e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x3e,
+ 0xf0, 0x3c, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 14 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x73, 0x9c, 0x73, 0x9c,
+ 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0, 0x7c, 0x7c,
+ 0x7c, 0x7c, 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8,
+ 0x73, 0x9c, 0x73, 0x9c, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xc0, 0x00, 0xf0, 0x00, 0xfc, 0x00, 0xff, 0x00,
+ 0xff, 0xc0, 0xff, 0xf0, 0xff, 0xfc, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xfc, 0xff, 0xf0, 0xff, 0xc0,
+ 0xff, 0x00, 0xfc, 0x00, 0xf0, 0x00, 0xc0, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 16 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x03, 0x00, 0x0f, 0x00, 0x3f, 0x00, 0xff,
+ 0x03, 0xff, 0x0f, 0xff, 0x3f, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x3f, 0xff, 0x0f, 0xff, 0x03, 0xff,
+ 0x00, 0xff, 0x00, 0x3f, 0x00, 0x0f, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 17 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c,
+ 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x63, 0x8c,
+ 0x73, 0x9c, 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0,
+ 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 18 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 19 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xfe, 0x3f, 0xfe,
+ 0x79, 0xce, 0x71, 0xce, 0x71, 0xce, 0x71, 0xce,
+ 0x71, 0xce, 0x71, 0xce, 0x79, 0xce, 0x3f, 0xce,
+ 0x1f, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce,
+ 0x01, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce,
+ 0x01, 0xce, 0x01, 0xce, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 20 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x07, 0xe0, 0x0f, 0xf0, 0x1e, 0x78, 0x1c, 0x38,
+ 0x1c, 0x00, 0x1e, 0x00, 0x0f, 0xc0, 0x0f, 0xe0,
+ 0x1c, 0xf0, 0x1c, 0x78, 0x1c, 0x38, 0x1c, 0x38,
+ 0x1c, 0x38, 0x1e, 0x38, 0x0f, 0x38, 0x07, 0xf0,
+ 0x03, 0xf0, 0x00, 0x78, 0x00, 0x38, 0x1c, 0x38,
+ 0x1e, 0x78, 0x0f, 0xf0, 0x07, 0xe0, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 21 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfe, 0x7f, 0xfe,
+ 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe,
+ 0x7f, 0xfe, 0x7f, 0xfe, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 22 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c,
+ 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x63, 0x8c, 0x73, 0x9c, 0x3b, 0xb8,
+ 0x1f, 0xf0, 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c,
+ 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 24 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x63, 0x8c,
+ 0x73, 0x9c, 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0,
+ 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 25 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xc0, 0x00, 0xe0, 0x00, 0x70,
+ 0x00, 0x38, 0x00, 0x1c, 0x7f, 0xfe, 0x7f, 0xfe,
+ 0x7f, 0xfe, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70,
+ 0x00, 0xe0, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 26 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x00, 0x07, 0x00, 0x0e, 0x00,
+ 0x1c, 0x00, 0x38, 0x00, 0x7f, 0xfe, 0x7f, 0xfe,
+ 0x7f, 0xfe, 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 27 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 28 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x06, 0x60, 0x0e, 0x70, 0x1c, 0x38,
+ 0x38, 0x1c, 0x70, 0x0e, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x70, 0x0e, 0x38, 0x1c, 0x1c, 0x38,
+ 0x0e, 0x70, 0x06, 0x60, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 29 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x80, 0x01, 0x80, 0x03, 0xc0, 0x03, 0xc0,
+ 0x07, 0xe0, 0x07, 0xe0, 0x0f, 0xf0, 0x0f, 0xf0,
+ 0x1f, 0xf8, 0x1f, 0xf8, 0x3f, 0xfc, 0x3f, 0xfc,
+ 0x7f, 0xfe, 0x7f, 0xfe, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0x7f, 0xfe, 0x7f, 0xfe,
+ 0x3f, 0xfc, 0x3f, 0xfc, 0x1f, 0xf8, 0x1f, 0xf8,
+ 0x0f, 0xf0, 0x0f, 0xf0, 0x07, 0xe0, 0x07, 0xe0,
+ 0x03, 0xc0, 0x03, 0xc0, 0x01, 0x80, 0x01, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 31 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 32 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 33 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 34 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 35 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x1f, 0xf0,
+ 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c, 0x73, 0x80,
+ 0x73, 0x80, 0x73, 0x80, 0x7b, 0x80, 0x3f, 0xf0,
+ 0x1f, 0xf8, 0x03, 0xbc, 0x03, 0x9c, 0x03, 0x9c,
+ 0x03, 0x9c, 0x73, 0x9c, 0x7b, 0xbc, 0x3f, 0xf8,
+ 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 36 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0x1c, 0x3f, 0x9c,
+ 0x3b, 0xb8, 0x3b, 0xb8, 0x3f, 0xf0, 0x1f, 0x70,
+ 0x00, 0xe0, 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0,
+ 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x07, 0x00,
+ 0x0e, 0xf8, 0x0f, 0xfc, 0x1d, 0xdc, 0x1d, 0xdc,
+ 0x39, 0xfc, 0x38, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 37 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xc0, 0x1f, 0xe0,
+ 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70,
+ 0x38, 0x70, 0x1c, 0xe0, 0x0f, 0xc0, 0x0f, 0x80,
+ 0x1f, 0xce, 0x38, 0xee, 0x70, 0x7c, 0x70, 0x38,
+ 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x7c,
+ 0x3f, 0xee, 0x1f, 0xce, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 38 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 39 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x01, 0xc0,
+ 0x03, 0x80, 0x07, 0x00, 0x07, 0x00, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
+ 0x0e, 0x00, 0x07, 0x00, 0x07, 0x00, 0x03, 0x80,
+ 0x01, 0xc0, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 40 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x07, 0x00,
+ 0x03, 0x80, 0x01, 0xc0, 0x01, 0xc0, 0x00, 0xe0,
+ 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0,
+ 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0,
+ 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0, 0x03, 0x80,
+ 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 41 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x38, 0x38, 0x1c, 0x70,
+ 0x0e, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0,
+ 0x1c, 0x70, 0x38, 0x38, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 42 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 43 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 44 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 45 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 46 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x70, 0x00, 0x70,
+ 0x00, 0xe0, 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0,
+ 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x07, 0x00,
+ 0x0e, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x1c, 0x00,
+ 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 47 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
+ 0x70, 0x7c, 0x70, 0xfc, 0x71, 0xdc, 0x73, 0x9c,
+ 0x77, 0x1c, 0x7e, 0x1c, 0x7c, 0x1c, 0x78, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 48 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0x80,
+ 0x0f, 0x80, 0x1f, 0x80, 0x1f, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x1f, 0xf0, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 49 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70,
+ 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
+ 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 50 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x3c, 0x0f, 0xf8,
+ 0x0f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 51 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x3c,
+ 0x00, 0x7c, 0x00, 0xfc, 0x01, 0xdc, 0x03, 0x9c,
+ 0x07, 0x1c, 0x0e, 0x1c, 0x1c, 0x1c, 0x38, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 52 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
+ 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 53 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x3f, 0xf8,
+ 0x78, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
+ 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 54 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x38,
+ 0x00, 0x38, 0x00, 0x70, 0x00, 0x70, 0x00, 0xe0,
+ 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 55 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8,
+ 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 56 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x3c,
+ 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 58 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 59 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x38,
+ 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80,
+ 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00,
+ 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00,
+ 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0x70,
+ 0x00, 0x38, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 61 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x1c, 0x00,
+ 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0,
+ 0x00, 0xe0, 0x00, 0x70, 0x00, 0x38, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00,
+ 0x1c, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 62 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 63 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x3f, 0xfc,
+ 0x78, 0x0e, 0x70, 0x06, 0x71, 0xfe, 0x73, 0xfe,
+ 0x77, 0x8e, 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x0e,
+ 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x9e,
+ 0x73, 0xfe, 0x71, 0xf6, 0x70, 0x00, 0x78, 0x00,
+ 0x3f, 0xfe, 0x1f, 0xfe, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 64 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 65 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
+ 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x38, 0x7f, 0xf0, 0x7f, 0xf0,
+ 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
+ 0x7f, 0xf8, 0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 66 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xc0, 0x7f, 0xf0,
+ 0x70, 0x78, 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x38, 0x70, 0x78,
+ 0x7f, 0xf0, 0x7f, 0xc0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 68 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0,
+ 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 69 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0,
+ 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x71, 0xfc,
+ 0x71, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 71 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 72 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0, 0x0f, 0xe0,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 73 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0xfe,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
+ 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x78,
+ 0x3f, 0xf0, 0x1f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 74 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x0c, 0x70, 0x1c,
+ 0x70, 0x38, 0x70, 0x70, 0x70, 0xe0, 0x71, 0xc0,
+ 0x73, 0x80, 0x77, 0x00, 0x7e, 0x00, 0x7c, 0x00,
+ 0x7c, 0x00, 0x7e, 0x00, 0x77, 0x00, 0x73, 0x80,
+ 0x71, 0xc0, 0x70, 0xe0, 0x70, 0x70, 0x70, 0x38,
+ 0x70, 0x1c, 0x70, 0x0c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 75 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 76 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x0e, 0x70, 0x0e,
+ 0x78, 0x1e, 0x7c, 0x3e, 0x7e, 0x7e, 0x7e, 0x7e,
+ 0x77, 0xee, 0x73, 0xce, 0x73, 0xce, 0x71, 0x8e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x7c, 0x1c, 0x7e, 0x1c, 0x77, 0x1c, 0x73, 0x9c,
+ 0x71, 0xdc, 0x70, 0xfc, 0x70, 0x7c, 0x70, 0x3c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 78 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 79 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
+ 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
+ 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x73, 0x9c, 0x79, 0xfc,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x38, 0x00, 0x1c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 81 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
+ 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
+ 0x7f, 0xf8, 0x7f, 0xf0, 0x7e, 0x00, 0x77, 0x00,
+ 0x73, 0x80, 0x71, 0xc0, 0x70, 0xe0, 0x70, 0x70,
+ 0x70, 0x38, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 82 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x78, 0x00, 0x3f, 0xf0,
+ 0x1f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 83 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 84 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 85 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38,
+ 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0,
+ 0x07, 0xc0, 0x07, 0xc0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 86 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x0e, 0x70, 0x0e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
+ 0x71, 0x8e, 0x73, 0xce, 0x73, 0xce, 0x77, 0xee,
+ 0x7e, 0x7e, 0x7e, 0x7e, 0x7c, 0x3e, 0x78, 0x1e,
+ 0x70, 0x0e, 0x70, 0x0e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 87 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70,
+ 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0,
+ 0x07, 0xc0, 0x07, 0xc0, 0x0e, 0xe0, 0x0e, 0xe0,
+ 0x1c, 0x70, 0x1c, 0x70, 0x38, 0x38, 0x38, 0x38,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 88 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70,
+ 0x1c, 0x70, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0,
+ 0x07, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 89 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x38,
+ 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80,
+ 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 90 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xf0, 0x0f, 0xf0,
+ 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
+ 0x0f, 0xf0, 0x0f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 91 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38, 0x00,
+ 0x1c, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x07, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x01, 0xc0, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0xe0,
+ 0x00, 0x70, 0x00, 0x70, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 92 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xf0, 0x0f, 0xf0,
+ 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70,
+ 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70,
+ 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70,
+ 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70,
+ 0x0f, 0xf0, 0x0f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 93 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0, 0x1c, 0x70,
+ 0x38, 0x38, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 94 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 95 */
+ 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00,
+ 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 96 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 97 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
+ 0x7f, 0xf8, 0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 98 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 99 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c,
+ 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 100 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 101 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x01, 0xfe,
+ 0x03, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x3f, 0xf8, 0x3f, 0xf8, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 102 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 103 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 104 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 105 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xf8, 0x00, 0xf8, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x38, 0x38, 0x38, 0x38,
+ 0x3c, 0x78, 0x1f, 0xf0, 0x0f, 0xe0, 0x00, 0x00, /* 106 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38, 0x00,
+ 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00,
+ 0x38, 0x1c, 0x38, 0x38, 0x38, 0x70, 0x38, 0xe0,
+ 0x39, 0xc0, 0x3b, 0x80, 0x3f, 0x00, 0x3f, 0x00,
+ 0x3b, 0x80, 0x39, 0xc0, 0x38, 0xe0, 0x38, 0x70,
+ 0x38, 0x38, 0x38, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 107 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0x80, 0x0f, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 108 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xf0, 0x7f, 0xf8, 0x73, 0xbc, 0x73, 0x9c,
+ 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
+ 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
+ 0x73, 0x9c, 0x73, 0x9c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 109 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 110 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 111 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
+ 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 112 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, /* 113 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x73, 0xfc, 0x77, 0xfc, 0x7e, 0x00, 0x7c, 0x00,
+ 0x78, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 114 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x00,
+ 0x70, 0x00, 0x78, 0x00, 0x3f, 0xf0, 0x1f, 0xf8,
+ 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 115 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x07, 0x00,
+ 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00,
+ 0x7f, 0xf0, 0x7f, 0xf0, 0x07, 0x00, 0x07, 0x00,
+ 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00,
+ 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x80,
+ 0x03, 0xfc, 0x01, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 116 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 117 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38,
+ 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0,
+ 0x07, 0xc0, 0x07, 0xc0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 118 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
+ 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x7b, 0xbc,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 119 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38,
+ 0x1c, 0x70, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0,
+ 0x0e, 0xe0, 0x1c, 0x70, 0x38, 0x38, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 120 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 121 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x38, 0x00, 0x70,
+ 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
+ 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 122 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xf0, 0x03, 0xf0,
+ 0x07, 0x80, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00,
+ 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x3e, 0x00,
+ 0x3e, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00,
+ 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x80,
+ 0x03, 0xf0, 0x01, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 123 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 124 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x3f, 0x00,
+ 0x07, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x01, 0xf0,
+ 0x01, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x07, 0x80,
+ 0x3f, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 125 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1e, 0x1c, 0x3f, 0x1c, 0x77, 0x9c, 0x73, 0xdc,
+ 0x71, 0xf8, 0x70, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 126 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0f, 0xe0, 0x1e, 0xf0, 0x3c, 0x78, 0x78, 0x3c,
+ 0xf0, 0x1e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e,
+ 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e,
+ 0xff, 0xfe, 0xff, 0xfe, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 127 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, /* 128 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 129 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 130 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 131 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 132 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 133 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x0e, 0xe0,
+ 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x00, 0x00,
+ 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 134 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, /* 135 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 136 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 137 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 138 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 139 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 140 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 141 */
+ 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 142 */
+ 0x00, 0x00, 0x07, 0xc0, 0x0e, 0xe0, 0x0e, 0xe0,
+ 0x0e, 0xe0, 0x07, 0xc0, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 143 */
+ 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0,
+ 0x03, 0x80, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0,
+ 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 144 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e,
+ 0x03, 0x8e, 0x3f, 0x8e, 0x7f, 0xfe, 0xf3, 0xfe,
+ 0xe3, 0x80, 0xe3, 0x80, 0xe3, 0x80, 0xf3, 0xce,
+ 0x7f, 0xfe, 0x3e, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 145 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3f, 0xfe, 0x7f, 0xfe,
+ 0xf1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0,
+ 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xff, 0xfe,
+ 0xff, 0xfe, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0,
+ 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0,
+ 0xe1, 0xfe, 0xe1, 0xfe, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 146 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 147 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 148 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 149 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
+ 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 150 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 151 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 152 */
+ 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 153 */
+ 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 154 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c,
+ 0x73, 0x80, 0x73, 0x80, 0x73, 0x80, 0x73, 0x80,
+ 0x73, 0x80, 0x73, 0x80, 0x73, 0x9c, 0x7b, 0xbc,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 155 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0xe0, 0x0f, 0xf0,
+ 0x1e, 0x78, 0x1c, 0x38, 0x1c, 0x00, 0x1c, 0x00,
+ 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x7f, 0xe0,
+ 0x7f, 0xe0, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00,
+ 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x1c, 0x1c, 0x1c,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 156 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70,
+ 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0,
+ 0x03, 0x80, 0x03, 0x80, 0x3f, 0xf8, 0x3f, 0xf8,
+ 0x03, 0x80, 0x03, 0x80, 0x3f, 0xf8, 0x3f, 0xf8,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 157 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0x80,
+ 0xe3, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0,
+ 0xe1, 0xc0, 0xe1, 0xc0, 0xe3, 0xc0, 0xff, 0xf0,
+ 0xff, 0x70, 0xe0, 0x70, 0xe3, 0xfe, 0xe3, 0xfe,
+ 0xe0, 0x70, 0xe0, 0x70, 0xe0, 0x70, 0xe0, 0x70,
+ 0xe0, 0x7e, 0xe0, 0x3e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 158 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xf8, 0x03, 0xfc,
+ 0x03, 0x9c, 0x03, 0x9c, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x1f, 0xf0, 0x1f, 0xf0,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x73, 0x80, 0x73, 0x80,
+ 0x7f, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 159 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 160 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 161 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 162 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 163 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0x38, 0x3b, 0xb8,
+ 0x3b, 0xb8, 0x39, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 164 */
+ 0x00, 0x00, 0x1f, 0x38, 0x3b, 0xb8, 0x3b, 0xb8,
+ 0x39, 0xf0, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
+ 0x7c, 0x1c, 0x7e, 0x1c, 0x77, 0x1c, 0x73, 0x9c,
+ 0x71, 0xdc, 0x70, 0xfc, 0x70, 0x7c, 0x70, 0x3c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 165 */
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xe0, 0x1f, 0xf0,
+ 0x00, 0x38, 0x00, 0x38, 0x0f, 0xf8, 0x1f, 0xf8,
+ 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x1f, 0xf8,
+ 0x0f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8,
+ 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 166 */
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0, 0x1f, 0xf0,
+ 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38,
+ 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x1f, 0xf0,
+ 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8,
+ 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 167 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x07, 0x00,
+ 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 168 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 169 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 170 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x3c, 0x00,
+ 0x7c, 0x06, 0x1c, 0x0e, 0x1c, 0x1c, 0x1c, 0x38,
+ 0x1c, 0x70, 0x1c, 0xe0, 0x1d, 0xc0, 0x03, 0x80,
+ 0x07, 0x00, 0x0e, 0xfc, 0x1d, 0xfe, 0x39, 0xce,
+ 0x71, 0xce, 0x60, 0x1c, 0x00, 0x38, 0x00, 0x70,
+ 0x00, 0xfe, 0x01, 0xfe, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 171 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x1e, 0x00,
+ 0x3e, 0x00, 0x0e, 0x00, 0x0e, 0x06, 0x0e, 0x0e,
+ 0x0e, 0x1c, 0x0e, 0x38, 0x0e, 0x70, 0x00, 0xe0,
+ 0x01, 0xce, 0x03, 0x9e, 0x07, 0x3e, 0x0e, 0x7e,
+ 0x1c, 0xee, 0x39, 0xce, 0x73, 0xfe, 0x63, 0xfe,
+ 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 172 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 173 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0xce, 0x03, 0x9c, 0x07, 0x38, 0x0e, 0x70,
+ 0x1c, 0xe0, 0x39, 0xc0, 0x73, 0x80, 0x73, 0x80,
+ 0x39, 0xc0, 0x1c, 0xe0, 0x0e, 0x70, 0x07, 0x38,
+ 0x03, 0x9c, 0x01, 0xce, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 174 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x73, 0x80, 0x39, 0xc0, 0x1c, 0xe0, 0x0e, 0x70,
+ 0x07, 0x38, 0x03, 0x9c, 0x01, 0xce, 0x01, 0xce,
+ 0x03, 0x9c, 0x07, 0x38, 0x0e, 0x70, 0x1c, 0xe0,
+ 0x39, 0xc0, 0x73, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 175 */
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
+ 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, /* 176 */
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
+ 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, /* 177 */
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
+ 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, /* 178 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 179 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80,
+ 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 180 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 181 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0xfe, 0x70, 0xfe, 0x70,
+ 0xfe, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 182 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xf0, 0xff, 0xf0,
+ 0xff, 0xf0, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 183 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 184 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0x00, 0x70,
+ 0x00, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 185 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 186 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xf0, 0xff, 0xf0, 0xff, 0xf0, 0x00, 0x70,
+ 0x00, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 187 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0x00, 0x70,
+ 0x00, 0x70, 0xff, 0xf0, 0xff, 0xf0, 0xff, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 188 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xf0, 0xff, 0xf0,
+ 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 189 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 190 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x80, 0xff, 0x80,
+ 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 191 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff,
+ 0x03, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 192 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 193 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 194 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff,
+ 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 195 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 196 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 197 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 198 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x7f, 0x0e, 0x7f,
+ 0x0e, 0x7f, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 199 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0f, 0xff, 0x0f, 0xff, 0x0f, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 200 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0xff, 0x0f, 0xff, 0x0f, 0xff, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 201 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 202 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0x00, 0x00, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 203 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x00,
+ 0x0e, 0x00, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 204 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 205 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0x00, 0x00,
+ 0x00, 0x00, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 206 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 207 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 208 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 209 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 210 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0f, 0xff, 0x0f, 0xff,
+ 0x0f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 211 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 212 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 213 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0xff,
+ 0x0f, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 214 */
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
+ 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 215 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x80,
+ 0x03, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 216 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80,
+ 0xff, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 217 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0xff, 0x03, 0xff,
+ 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 218 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 219 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 220 */
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
+ 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, /* 221 */
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
+ 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, /* 222 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 223 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xee, 0x3f, 0xfe, 0x78, 0x3c, 0x70, 0x38,
+ 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38,
+ 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x3c,
+ 0x3f, 0xfe, 0x1f, 0xee, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 224 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x3f, 0xe0, 0x7f, 0xf0,
+ 0x70, 0x78, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38,
+ 0x70, 0x38, 0x70, 0x70, 0x7f, 0xf0, 0x7f, 0xf0,
+ 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
+ 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 225 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 226 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 227 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
+ 0x70, 0x00, 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0,
+ 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
+ 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 228 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xfe, 0x3f, 0xfe, 0x78, 0xf0, 0x70, 0x78,
+ 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 229 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x3c, 0x70, 0x7c, 0x70, 0xfc,
+ 0x7f, 0xdc, 0x7f, 0x9c, 0x70, 0x00, 0x70, 0x00,
+ 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 230 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0xc0,
+ 0x01, 0xf8, 0x00, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 231 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x1f, 0xf0, 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c,
+ 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
+ 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
+ 0x73, 0x9c, 0x7b, 0xbc, 0x3f, 0xf8, 0x1f, 0xf0,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 232 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x77, 0xdc,
+ 0x77, 0xdc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 233 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
+ 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
+ 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 234 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x1f, 0xf0,
+ 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0,
+ 0x0f, 0xe0, 0x1f, 0xf0, 0x38, 0x38, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
+ 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 235 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0xf8,
+ 0x7f, 0xfc, 0xe7, 0xce, 0xe3, 0x8e, 0xe3, 0x8e,
+ 0xe3, 0x8e, 0xe3, 0x8e, 0xe7, 0xce, 0x7f, 0xfc,
+ 0x3e, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 236 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c,
+ 0x00, 0x38, 0x00, 0x38, 0x0f, 0xf0, 0x1f, 0xf8,
+ 0x38, 0xfc, 0x38, 0xfc, 0x39, 0xdc, 0x39, 0xdc,
+ 0x3b, 0x9c, 0x3b, 0x9c, 0x3f, 0x1c, 0x3f, 0x1c,
+ 0x1f, 0xf8, 0x0f, 0xf0, 0x1c, 0x00, 0x1c, 0x00,
+ 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 237 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x07, 0xfc, 0x1f, 0xfc, 0x3c, 0x00,
+ 0x38, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x38, 0x00,
+ 0x3c, 0x00, 0x1f, 0xfc, 0x07, 0xfc, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 238 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x1f, 0xf0,
+ 0x3c, 0x78, 0x38, 0x38, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
+ 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 239 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 240 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 241 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0,
+ 0x00, 0x70, 0x00, 0x38, 0x00, 0x38, 0x00, 0x70,
+ 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
+ 0x0e, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0xfc, 0x3f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 242 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x70,
+ 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
+ 0x0e, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+ 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0,
+ 0x00, 0x70, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00,
+ 0x3f, 0xfc, 0x3f, 0xfc, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 243 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xf8, 0x03, 0xfc,
+ 0x03, 0x9c, 0x03, 0x9c, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 244 */
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x73, 0x80, 0x73, 0x80,
+ 0x7f, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 245 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
+ 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 246 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x1c,
+ 0x7f, 0xbc, 0x7b, 0xfc, 0x70, 0xf8, 0x00, 0x00,
+ 0x00, 0x00, 0x3e, 0x1c, 0x7f, 0xbc, 0x7b, 0xfc,
+ 0x70, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 247 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x0f, 0xe0, 0x1f, 0xf0, 0x1c, 0x70, 0x1c, 0x70,
+ 0x1c, 0x70, 0x1c, 0x70, 0x1f, 0xf0, 0x0f, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 248 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x03, 0xc0, 0x07, 0xe0, 0x07, 0xe0,
+ 0x07, 0xe0, 0x07, 0xe0, 0x03, 0xc0, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 249 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
+ 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 250 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e,
+ 0x00, 0x3e, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
+ 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x70, 0x38,
+ 0x70, 0x38, 0x70, 0x38, 0x78, 0x38, 0x3c, 0x38,
+ 0x1e, 0x38, 0x0f, 0x38, 0x07, 0xb8, 0x03, 0xf8,
+ 0x01, 0xf8, 0x00, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 251 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x1f, 0xe0, 0x1f, 0xf0, 0x1c, 0x38, 0x1c, 0x38,
+ 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38,
+ 0x1c, 0x38, 0x1c, 0x38, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 252 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0,
+ 0x1f, 0xf0, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0xe0,
+ 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00,
+ 0x1f, 0xf0, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 253 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x1f, 0xf8,
+ 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8,
+ 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8,
+ 0x1f, 0xf8, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 254 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 255 */
+
+};
+
+
+const struct font_desc font_ter_16x32 = {
+ .idx = TER16x32_IDX,
+ .name = "TER16x32",
+ .width = 16,
+ .height = 32,
+ .data = fontdata_ter16x32,
+#ifdef __sparc__
+ .pref = 5,
+#else
+ .pref = -1,
+#endif
+};
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index 823376ca0a8b..9969358a7af5 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -67,6 +67,10 @@ static const struct font_desc *fonts[] = {
#undef NO_FONTS
&font_6x10,
#endif
+#ifdef CONFIG_FONT_TER16x32
+#undef NO_FONTS
+ &font_ter_16x32,
+#endif
};
#define num_fonts ARRAY_SIZE(fonts)
diff --git a/lib/gcd.c b/lib/gcd.c
index 227dea924425..7948ab27f0a4 100644
--- a/lib/gcd.c
+++ b/lib/gcd.c
@@ -10,7 +10,7 @@
* has decent hardware division.
*/
-#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS) && !defined(CPU_NO_EFFICIENT_FFS)
+#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS)
/* If __ffs is available, the even/odd algorithm benchmarks slower. */
diff --git a/lib/gen_crc64table.c b/lib/gen_crc64table.c
index 9011926e4162..094b43aef8db 100644
--- a/lib/gen_crc64table.c
+++ b/lib/gen_crc64table.c
@@ -16,8 +16,6 @@
#include <inttypes.h>
#include <stdio.h>
-#include <linux/swab.h>
-
#define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL
static uint64_t crc64_table[256] = {0};
diff --git a/lib/genalloc.c b/lib/genalloc.c
index ca06adc4f445..7e85d1e37a6e 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -35,6 +35,7 @@
#include <linux/interrupt.h>
#include <linux/genalloc.h>
#include <linux/of_device.h>
+#include <linux/vmalloc.h>
static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
{
@@ -187,7 +188,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
int nbytes = sizeof(struct gen_pool_chunk) +
BITS_TO_LONGS(nbits) * sizeof(long);
- chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
+ chunk = vzalloc_node(nbytes, nid);
if (unlikely(chunk == NULL))
return -ENOMEM;
@@ -251,7 +252,7 @@ void gen_pool_destroy(struct gen_pool *pool)
bit = find_next_bit(chunk->bits, end_bit, 0);
BUG_ON(bit < end_bit);
- kfree(chunk);
+ vfree(chunk);
}
kfree_const(pool->name);
kfree(pool);
@@ -311,7 +312,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
end_bit = chunk_size(chunk) >> order;
retry:
start_bit = algo(chunk->bits, end_bit, start_bit,
- nbits, data, pool);
+ nbits, data, pool, chunk->start_addr);
if (start_bit >= end_bit)
continue;
remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
@@ -525,7 +526,7 @@ EXPORT_SYMBOL(gen_pool_set_algo);
*/
unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool)
+ struct gen_pool *pool, unsigned long start_addr)
{
return bitmap_find_next_zero_area(map, size, start, nr, 0);
}
@@ -543,16 +544,19 @@ EXPORT_SYMBOL(gen_pool_first_fit);
*/
unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool)
+ struct gen_pool *pool, unsigned long start_addr)
{
struct genpool_data_align *alignment;
- unsigned long align_mask;
+ unsigned long align_mask, align_off;
int order;
alignment = data;
order = pool->min_alloc_order;
align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
- return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+ align_off = (start_addr & (alignment->align - 1)) >> order;
+
+ return bitmap_find_next_zero_area_off(map, size, start, nr,
+ align_mask, align_off);
}
EXPORT_SYMBOL(gen_pool_first_fit_align);
@@ -567,7 +571,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_align);
*/
unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool)
+ struct gen_pool *pool, unsigned long start_addr)
{
struct genpool_data_fixed *fixed_data;
int order;
@@ -601,7 +605,8 @@ EXPORT_SYMBOL(gen_pool_fixed_alloc);
*/
unsigned long gen_pool_first_fit_order_align(unsigned long *map,
unsigned long size, unsigned long start,
- unsigned int nr, void *data, struct gen_pool *pool)
+ unsigned int nr, void *data, struct gen_pool *pool,
+ unsigned long start_addr)
{
unsigned long align_mask = roundup_pow_of_two(nr) - 1;
@@ -624,7 +629,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align);
*/
unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data,
- struct gen_pool *pool)
+ struct gen_pool *pool, unsigned long start_addr)
{
unsigned long start_bit = size;
unsigned long len = size + 1;
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
new file mode 100644
index 000000000000..a7bafc413730
--- /dev/null
+++ b/lib/generic-radix-tree.c
@@ -0,0 +1,217 @@
+
+#include <linux/export.h>
+#include <linux/generic-radix-tree.h>
+#include <linux/gfp.h>
+
+#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *))
+#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
+
+struct genradix_node {
+ union {
+ /* Interior node: */
+ struct genradix_node *children[GENRADIX_ARY];
+
+ /* Leaf: */
+ u8 data[PAGE_SIZE];
+ };
+};
+
+static inline int genradix_depth_shift(unsigned depth)
+{
+ return PAGE_SHIFT + GENRADIX_ARY_SHIFT * depth;
+}
+
+/*
+ * Returns size (of data, in bytes) that a tree of a given depth holds:
+ */
+static inline size_t genradix_depth_size(unsigned depth)
+{
+ return 1UL << genradix_depth_shift(depth);
+}
+
+/* depth that's needed for a genradix that can address up to ULONG_MAX: */
+#define GENRADIX_MAX_DEPTH \
+ DIV_ROUND_UP(BITS_PER_LONG - PAGE_SHIFT, GENRADIX_ARY_SHIFT)
+
+#define GENRADIX_DEPTH_MASK \
+ ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
+
+unsigned genradix_root_to_depth(struct genradix_root *r)
+{
+ return (unsigned long) r & GENRADIX_DEPTH_MASK;
+}
+
+struct genradix_node *genradix_root_to_node(struct genradix_root *r)
+{
+ return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
+}
+
+/*
+ * Returns pointer to the specified byte @offset within @radix, or NULL if not
+ * allocated
+ */
+void *__genradix_ptr(struct __genradix *radix, size_t offset)
+{
+ struct genradix_root *r = READ_ONCE(radix->root);
+ struct genradix_node *n = genradix_root_to_node(r);
+ unsigned level = genradix_root_to_depth(r);
+
+ if (ilog2(offset) >= genradix_depth_shift(level))
+ return NULL;
+
+ while (1) {
+ if (!n)
+ return NULL;
+ if (!level)
+ break;
+
+ level--;
+
+ n = n->children[offset >> genradix_depth_shift(level)];
+ offset &= genradix_depth_size(level) - 1;
+ }
+
+ return &n->data[offset];
+}
+EXPORT_SYMBOL(__genradix_ptr);
+
+/*
+ * Returns pointer to the specified byte @offset within @radix, allocating it if
+ * necessary - newly allocated slots are always zeroed out:
+ */
+void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
+ gfp_t gfp_mask)
+{
+ struct genradix_root *v = READ_ONCE(radix->root);
+ struct genradix_node *n, *new_node = NULL;
+ unsigned level;
+
+ /* Increase tree depth if necessary: */
+ while (1) {
+ struct genradix_root *r = v, *new_root;
+
+ n = genradix_root_to_node(r);
+ level = genradix_root_to_depth(r);
+
+ if (n && ilog2(offset) < genradix_depth_shift(level))
+ break;
+
+ if (!new_node) {
+ new_node = (void *)
+ __get_free_page(gfp_mask|__GFP_ZERO);
+ if (!new_node)
+ return NULL;
+ }
+
+ new_node->children[0] = n;
+ new_root = ((struct genradix_root *)
+ ((unsigned long) new_node | (n ? level + 1 : 0)));
+
+ if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) {
+ v = new_root;
+ new_node = NULL;
+ }
+ }
+
+ while (level--) {
+ struct genradix_node **p =
+ &n->children[offset >> genradix_depth_shift(level)];
+ offset &= genradix_depth_size(level) - 1;
+
+ n = READ_ONCE(*p);
+ if (!n) {
+ if (!new_node) {
+ new_node = (void *)
+ __get_free_page(gfp_mask|__GFP_ZERO);
+ if (!new_node)
+ return NULL;
+ }
+
+ if (!(n = cmpxchg_release(p, NULL, new_node)))
+ swap(n, new_node);
+ }
+ }
+
+ if (new_node)
+ free_page((unsigned long) new_node);
+
+ return &n->data[offset];
+}
+EXPORT_SYMBOL(__genradix_ptr_alloc);
+
+void *__genradix_iter_peek(struct genradix_iter *iter,
+ struct __genradix *radix,
+ size_t objs_per_page)
+{
+ struct genradix_root *r;
+ struct genradix_node *n;
+ unsigned level, i;
+restart:
+ r = READ_ONCE(radix->root);
+ if (!r)
+ return NULL;
+
+ n = genradix_root_to_node(r);
+ level = genradix_root_to_depth(r);
+
+ if (ilog2(iter->offset) >= genradix_depth_shift(level))
+ return NULL;
+
+ while (level) {
+ level--;
+
+ i = (iter->offset >> genradix_depth_shift(level)) &
+ (GENRADIX_ARY - 1);
+
+ while (!n->children[i]) {
+ i++;
+ iter->offset = round_down(iter->offset +
+ genradix_depth_size(level),
+ genradix_depth_size(level));
+ iter->pos = (iter->offset >> PAGE_SHIFT) *
+ objs_per_page;
+ if (i == GENRADIX_ARY)
+ goto restart;
+ }
+
+ n = n->children[i];
+ }
+
+ return &n->data[iter->offset & (PAGE_SIZE - 1)];
+}
+EXPORT_SYMBOL(__genradix_iter_peek);
+
+static void genradix_free_recurse(struct genradix_node *n, unsigned level)
+{
+ if (level) {
+ unsigned i;
+
+ for (i = 0; i < GENRADIX_ARY; i++)
+ if (n->children[i])
+ genradix_free_recurse(n->children[i], level - 1);
+ }
+
+ free_page((unsigned long) n);
+}
+
+int __genradix_prealloc(struct __genradix *radix, size_t size,
+ gfp_t gfp_mask)
+{
+ size_t offset;
+
+ for (offset = 0; offset < size; offset += PAGE_SIZE)
+ if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(__genradix_prealloc);
+
+void __genradix_free(struct __genradix *radix)
+{
+ struct genradix_root *r = xchg(&radix->root, NULL);
+
+ genradix_free_recurse(genradix_root_to_node(r),
+ genradix_root_to_depth(r));
+}
+EXPORT_SYMBOL(__genradix_free);
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index 14436f4ca6bd..30e0f9770f88 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x)
if (x <= ULONG_MAX)
return int_sqrt((unsigned long) x);
- m = 1ULL << (fls64(x) & ~1ULL);
+ m = 1ULL << ((fls64(x) - 1) & ~1ULL);
while (m != 0) {
b = y + m;
y >>= 1;
diff --git a/lib/iomap.c b/lib/iomap.c
index 541d926da95e..e909ab71e995 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -65,8 +65,9 @@ static void bad_io_access(unsigned long port, const char *access)
#endif
#ifndef mmio_read16be
-#define mmio_read16be(addr) be16_to_cpu(__raw_readw(addr))
-#define mmio_read32be(addr) be32_to_cpu(__raw_readl(addr))
+#define mmio_read16be(addr) swab16(readw(addr))
+#define mmio_read32be(addr) swab32(readl(addr))
+#define mmio_read64be(addr) swab64(readq(addr))
#endif
unsigned int ioread8(void __iomem *addr)
@@ -100,14 +101,89 @@ EXPORT_SYMBOL(ioread16be);
EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
+#ifdef readq
+static u64 pio_read64_lo_hi(unsigned long port)
+{
+ u64 lo, hi;
+
+ lo = inl(port);
+ hi = inl(port + sizeof(u32));
+
+ return lo | (hi << 32);
+}
+
+static u64 pio_read64_hi_lo(unsigned long port)
+{
+ u64 lo, hi;
+
+ hi = inl(port + sizeof(u32));
+ lo = inl(port);
+
+ return lo | (hi << 32);
+}
+
+static u64 pio_read64be_lo_hi(unsigned long port)
+{
+ u64 lo, hi;
+
+ lo = pio_read32be(port + sizeof(u32));
+ hi = pio_read32be(port);
+
+ return lo | (hi << 32);
+}
+
+static u64 pio_read64be_hi_lo(unsigned long port)
+{
+ u64 lo, hi;
+
+ hi = pio_read32be(port);
+ lo = pio_read32be(port + sizeof(u32));
+
+ return lo | (hi << 32);
+}
+
+u64 ioread64_lo_hi(void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr));
+ return 0xffffffffffffffffULL;
+}
+
+u64 ioread64_hi_lo(void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr));
+ return 0xffffffffffffffffULL;
+}
+
+u64 ioread64be_lo_hi(void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64be_lo_hi(port),
+ return mmio_read64be(addr));
+ return 0xffffffffffffffffULL;
+}
+
+u64 ioread64be_hi_lo(void __iomem *addr)
+{
+ IO_COND(addr, return pio_read64be_hi_lo(port),
+ return mmio_read64be(addr));
+ return 0xffffffffffffffffULL;
+}
+
+EXPORT_SYMBOL(ioread64_lo_hi);
+EXPORT_SYMBOL(ioread64_hi_lo);
+EXPORT_SYMBOL(ioread64be_lo_hi);
+EXPORT_SYMBOL(ioread64be_hi_lo);
+
+#endif /* readq */
+
#ifndef pio_write16be
#define pio_write16be(val,port) outw(swab16(val),port)
#define pio_write32be(val,port) outl(swab32(val),port)
#endif
#ifndef mmio_write16be
-#define mmio_write16be(val,port) __raw_writew(be16_to_cpu(val),port)
-#define mmio_write32be(val,port) __raw_writel(be32_to_cpu(val),port)
+#define mmio_write16be(val,port) writew(swab16(val),port)
+#define mmio_write32be(val,port) writel(swab32(val),port)
+#define mmio_write64be(val,port) writeq(swab64(val),port)
#endif
void iowrite8(u8 val, void __iomem *addr)
@@ -136,6 +212,62 @@ EXPORT_SYMBOL(iowrite16be);
EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
+#ifdef writeq
+static void pio_write64_lo_hi(u64 val, unsigned long port)
+{
+ outl(val, port);
+ outl(val >> 32, port + sizeof(u32));
+}
+
+static void pio_write64_hi_lo(u64 val, unsigned long port)
+{
+ outl(val >> 32, port + sizeof(u32));
+ outl(val, port);
+}
+
+static void pio_write64be_lo_hi(u64 val, unsigned long port)
+{
+ pio_write32be(val, port + sizeof(u32));
+ pio_write32be(val >> 32, port);
+}
+
+static void pio_write64be_hi_lo(u64 val, unsigned long port)
+{
+ pio_write32be(val >> 32, port);
+ pio_write32be(val, port + sizeof(u32));
+}
+
+void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+ IO_COND(addr, pio_write64_lo_hi(val, port),
+ writeq(val, addr));
+}
+
+void iowrite64_hi_lo(u64 val, void __iomem *addr)
+{
+ IO_COND(addr, pio_write64_hi_lo(val, port),
+ writeq(val, addr));
+}
+
+void iowrite64be_lo_hi(u64 val, void __iomem *addr)
+{
+ IO_COND(addr, pio_write64be_lo_hi(val, port),
+ mmio_write64be(val, addr));
+}
+
+void iowrite64be_hi_lo(u64 val, void __iomem *addr)
+{
+ IO_COND(addr, pio_write64be_hi_lo(val, port),
+ mmio_write64be(val, addr));
+}
+
+EXPORT_SYMBOL(iowrite64_lo_hi);
+EXPORT_SYMBOL(iowrite64_hi_lo);
+EXPORT_SYMBOL(iowrite64be_lo_hi);
+EXPORT_SYMBOL(iowrite64be_hi_lo);
+
+#endif /* readq */
+
/*
* These are the "repeat MMIO read/write" functions.
* Note the "__raw" accesses, since we don't want to
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 517f5853ffed..063213685563 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -76,83 +76,123 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
return 0;
}
+static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr,
+ pgprot_t prot)
+{
+ if (!ioremap_pmd_enabled())
+ return 0;
+
+ if ((end - addr) != PMD_SIZE)
+ return 0;
+
+ if (!IS_ALIGNED(phys_addr, PMD_SIZE))
+ return 0;
+
+ if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
+ return 0;
+
+ return pmd_set_huge(pmd, phys_addr, prot);
+}
+
static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pmd_t *pmd;
unsigned long next;
- phys_addr -= addr;
pmd = pmd_alloc(&init_mm, pud, addr);
if (!pmd)
return -ENOMEM;
do {
next = pmd_addr_end(addr, end);
- if (ioremap_pmd_enabled() &&
- ((next - addr) == PMD_SIZE) &&
- IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
- pmd_free_pte_page(pmd, addr)) {
- if (pmd_set_huge(pmd, phys_addr + addr, prot))
- continue;
- }
+ if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot))
+ continue;
- if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
+ if (ioremap_pte_range(pmd, addr, next, phys_addr, prot))
return -ENOMEM;
- } while (pmd++, addr = next, addr != end);
+ } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
return 0;
}
+static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr,
+ pgprot_t prot)
+{
+ if (!ioremap_pud_enabled())
+ return 0;
+
+ if ((end - addr) != PUD_SIZE)
+ return 0;
+
+ if (!IS_ALIGNED(phys_addr, PUD_SIZE))
+ return 0;
+
+ if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
+ return 0;
+
+ return pud_set_huge(pud, phys_addr, prot);
+}
+
static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
pud_t *pud;
unsigned long next;
- phys_addr -= addr;
pud = pud_alloc(&init_mm, p4d, addr);
if (!pud)
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
- if (ioremap_pud_enabled() &&
- ((next - addr) == PUD_SIZE) &&
- IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
- pud_free_pmd_page(pud, addr)) {
- if (pud_set_huge(pud, phys_addr + addr, prot))
- continue;
- }
+ if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot))
+ continue;
- if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
+ if (ioremap_pmd_range(pud, addr, next, phys_addr, prot))
return -ENOMEM;
- } while (pud++, addr = next, addr != end);
+ } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
return 0;
}
+static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
+ unsigned long end, phys_addr_t phys_addr,
+ pgprot_t prot)
+{
+ if (!ioremap_p4d_enabled())
+ return 0;
+
+ if ((end - addr) != P4D_SIZE)
+ return 0;
+
+ if (!IS_ALIGNED(phys_addr, P4D_SIZE))
+ return 0;
+
+ if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
+ return 0;
+
+ return p4d_set_huge(p4d, phys_addr, prot);
+}
+
static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{
p4d_t *p4d;
unsigned long next;
- phys_addr -= addr;
p4d = p4d_alloc(&init_mm, pgd, addr);
if (!p4d)
return -ENOMEM;
do {
next = p4d_addr_end(addr, end);
- if (ioremap_p4d_enabled() &&
- ((next - addr) == P4D_SIZE) &&
- IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
- if (p4d_set_huge(p4d, phys_addr + addr, prot))
- continue;
- }
+ if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot))
+ continue;
- if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot))
+ if (ioremap_pud_range(p4d, addr, next, phys_addr, prot))
return -ENOMEM;
- } while (p4d++, addr = next, addr != end);
+ } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
return 0;
}
@@ -168,14 +208,13 @@ int ioremap_page_range(unsigned long addr,
BUG_ON(addr >= end);
start = addr;
- phys_addr -= addr;
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, end);
- err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
+ err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot);
if (err)
break;
- } while (pgd++, addr = next, addr != end);
+ } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
flush_cache_vmap(start, end);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 54c248526b55..b396d328a764 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -6,6 +6,7 @@
#include <linux/vmalloc.h>
#include <linux/splice.h>
#include <net/checksum.h>
+#include <linux/scatterlist.h>
#define PIPE_PARANOIA /* for now */
@@ -135,7 +136,7 @@
static int copyout(void __user *to, const void *from, size_t n)
{
- if (access_ok(VERIFY_WRITE, to, n)) {
+ if (access_ok(to, n)) {
kasan_check_read(from, n);
n = raw_copy_to_user(to, from, n);
}
@@ -144,7 +145,7 @@ static int copyout(void __user *to, const void *from, size_t n)
static int copyin(void *to, const void __user *from, size_t n)
{
- if (access_ok(VERIFY_READ, from, n)) {
+ if (access_ok(from, n)) {
kasan_check_write(to, n);
n = raw_copy_from_user(to, from, n);
}
@@ -560,13 +561,20 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
return bytes;
}
+static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
+ __wsum sum, size_t off)
+{
+ __wsum next = csum_partial_copy_nocheck(from, to, len, 0);
+ return csum_block_add(sum, next, off);
+}
+
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
__wsum *csum, struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
size_t n, r;
size_t off = 0;
- __wsum sum = *csum, next;
+ __wsum sum = *csum;
int idx;
if (!sanity(i))
@@ -578,8 +586,7 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
for ( ; n; idx = next_idx(idx, pipe), r = 0) {
size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
char *p = kmap_atomic(pipe->bufs[idx].page);
- next = csum_partial_copy_nocheck(addr, p + r, chunk, 0);
- sum = csum_block_add(sum, next, off);
+ sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
kunmap_atomic(p);
i->idx = idx;
i->iov_offset = r + chunk;
@@ -613,7 +620,7 @@ EXPORT_SYMBOL(_copy_to_iter);
#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
static int copyout_mcsafe(void __user *to, const void *from, size_t n)
{
- if (access_ok(VERIFY_WRITE, to, n)) {
+ if (access_ok(to, n)) {
kasan_check_read(from, n);
n = copy_to_user_mcsafe((__force void *) to, from, n);
}
@@ -854,8 +861,21 @@ EXPORT_SYMBOL(_copy_from_iter_full_nocache);
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{
- struct page *head = compound_head(page);
- size_t v = n + offset + page_address(page) - page_address(head);
+ struct page *head;
+ size_t v = n + offset;
+
+ /*
+ * The general case needs to access the page order in order
+ * to compute the page size.
+ * However, we mostly deal with order-0 pages and thus can
+ * avoid a possible cache line miss for requests that fit all
+ * page orders.
+ */
+ if (n <= v && v <= PAGE_SIZE)
+ return true;
+
+ head = compound_head(page);
+ v += (page - head) << PAGE_SHIFT;
if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
return true;
@@ -1400,17 +1420,15 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
err ? v.iov_len : 0;
}), ({
char *p = kmap_atomic(v.bv_page);
- next = csum_partial_copy_nocheck(p + v.bv_offset,
- (to += v.bv_len) - v.bv_len,
- v.bv_len, 0);
+ sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
+ p + v.bv_offset, v.bv_len,
+ sum, off);
kunmap_atomic(p);
- sum = csum_block_add(sum, next, off);
off += v.bv_len;
}),({
- next = csum_partial_copy_nocheck(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len, 0);
- sum = csum_block_add(sum, next, off);
+ sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
+ v.iov_base, v.iov_len,
+ sum, off);
off += v.iov_len;
})
)
@@ -1444,17 +1462,15 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
0;
}), ({
char *p = kmap_atomic(v.bv_page);
- next = csum_partial_copy_nocheck(p + v.bv_offset,
- (to += v.bv_len) - v.bv_len,
- v.bv_len, 0);
+ sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
+ p + v.bv_offset, v.bv_len,
+ sum, off);
kunmap_atomic(p);
- sum = csum_block_add(sum, next, off);
off += v.bv_len;
}),({
- next = csum_partial_copy_nocheck(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len, 0);
- sum = csum_block_add(sum, next, off);
+ sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
+ v.iov_base, v.iov_len,
+ sum, off);
off += v.iov_len;
})
)
@@ -1464,10 +1480,11 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
}
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
struct iov_iter *i)
{
const char *from = addr;
+ __wsum *csum = csump;
__wsum sum, next;
size_t off = 0;
@@ -1491,17 +1508,15 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
err ? v.iov_len : 0;
}), ({
char *p = kmap_atomic(v.bv_page);
- next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
- p + v.bv_offset,
- v.bv_len, 0);
+ sum = csum_and_memcpy(p + v.bv_offset,
+ (from += v.bv_len) - v.bv_len,
+ v.bv_len, sum, off);
kunmap_atomic(p);
- sum = csum_block_add(sum, next, off);
off += v.bv_len;
}),({
- next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
- v.iov_base,
- v.iov_len, 0);
- sum = csum_block_add(sum, next, off);
+ sum = csum_and_memcpy(v.iov_base,
+ (from += v.iov_len) - v.iov_len,
+ v.iov_len, sum, off);
off += v.iov_len;
})
)
@@ -1510,6 +1525,25 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
}
EXPORT_SYMBOL(csum_and_copy_to_iter);
+size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
+ struct iov_iter *i)
+{
+#ifdef CONFIG_CRYPTO
+ struct ahash_request *hash = hashp;
+ struct scatterlist sg;
+ size_t copied;
+
+ copied = copy_to_iter(addr, bytes, i);
+ sg_init_one(&sg, addr, copied);
+ ahash_request_set_crypt(hash, &sg, NULL, copied);
+ crypto_ahash_update(hash);
+ return copied;
+#else
+ return 0;
+#endif
+}
+EXPORT_SYMBOL(hash_and_copy_to_iter);
+
int iov_iter_npages(const struct iov_iter *i, int maxpages)
{
size_t size = i->count;
@@ -1646,7 +1680,7 @@ int import_single_range(int rw, void __user *buf, size_t len,
{
if (len > MAX_RW_COUNT)
len = MAX_RW_COUNT;
- if (unlikely(!access_ok(!rw, buf, len)))
+ if (unlikely(!access_ok(buf, len)))
return -EFAULT;
iov->iov_base = buf;
diff --git a/lib/irq_poll.c b/lib/irq_poll.c
index 86a709954f5a..2f17b488d58e 100644
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
@@ -35,7 +35,7 @@ void irq_poll_sched(struct irq_poll *iop)
local_irq_save(flags);
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
- __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_restore(flags);
}
EXPORT_SYMBOL(irq_poll_sched);
diff --git a/lib/kobject.c b/lib/kobject.c
index 97d86dc17c42..aa89edcd2b63 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -639,7 +639,7 @@ static void kobject_cleanup(struct kobject *kobj)
kobject_name(kobj), kobj, __func__, kobj->parent);
if (t && !t->release)
- pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed.\n",
+ pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n",
kobject_name(kobj), kobj);
/* send "remove" if the caller did not do it but sent "add" */
@@ -887,7 +887,7 @@ static void kset_release(struct kobject *kobj)
kfree(kset);
}
-void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
+static void kset_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
{
if (kobj->parent)
kobject_get_ownership(kobj->parent, uid, gid);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 63d0816ab23b..f05802687ba4 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -200,7 +200,7 @@ int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count)
r = kobject_action_type(buf, count, &action, &action_args);
if (r) {
- msg = "unknown uevent action string\n";
+ msg = "unknown uevent action string";
goto out;
}
@@ -212,7 +212,7 @@ int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count)
r = kobject_action_args(action_args,
count - (action_args - buf), &env);
if (r == -EINVAL) {
- msg = "incorrect uevent action arguments\n";
+ msg = "incorrect uevent action arguments";
goto out;
}
@@ -224,7 +224,7 @@ int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count)
out:
if (r) {
devpath = kobject_get_path(kobj, GFP_KERNEL);
- printk(KERN_WARNING "synth uevent: %s: %s",
+ pr_warn("synth uevent: %s: %s\n",
devpath ?: "unknown device",
msg ?: "failed to send uevent");
kfree(devpath);
@@ -240,6 +240,7 @@ static int kobj_usermode_filter(struct kobject *kobj)
ops = kobj_ns_ops(kobj);
if (ops) {
const void *init_ns, *ns;
+
ns = kobj->ktype->namespace(kobj);
init_ns = ops->initial_ns();
return ns != init_ns;
@@ -390,6 +391,7 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj,
ops = kobj_ns_ops(kobj);
if (!ops && kobj->kset) {
struct kobject *ksobj = &kobj->kset->kobj;
+
if (ksobj->parent != NULL)
ops = kobj_ns_ops(ksobj->parent);
}
@@ -579,7 +581,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
mutex_lock(&uevent_sock_mutex);
/* we will send an event, so request a new sequence number */
- retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum);
+ retval = add_uevent_var(env, "SEQNUM=%llu", ++uevent_seqnum);
if (retval) {
mutex_unlock(&uevent_sock_mutex);
goto exit;
@@ -763,8 +765,7 @@ static int uevent_net_init(struct net *net)
ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
if (!ue_sk->sk) {
- printk(KERN_ERR
- "kobject_uevent: unable to create netlink socket!\n");
+ pr_err("kobject_uevent: unable to create netlink socket!\n");
kfree(ue_sk);
return -ENODEV;
}
diff --git a/lib/livepatch/Makefile b/lib/livepatch/Makefile
new file mode 100644
index 000000000000..26900ddaef82
--- /dev/null
+++ b/lib/livepatch/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for livepatch test code.
+
+obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \
+ test_klp_callbacks_demo.o \
+ test_klp_callbacks_demo2.o \
+ test_klp_callbacks_busy.o \
+ test_klp_callbacks_mod.o \
+ test_klp_livepatch.o \
+ test_klp_shadow_vars.o
+
+# Target modules to be livepatched require CC_FLAGS_FTRACE
+CFLAGS_test_klp_callbacks_busy.o += $(CC_FLAGS_FTRACE)
+CFLAGS_test_klp_callbacks_mod.o += $(CC_FLAGS_FTRACE)
diff --git a/lib/livepatch/test_klp_atomic_replace.c b/lib/livepatch/test_klp_atomic_replace.c
new file mode 100644
index 000000000000..5af7093ca00c
--- /dev/null
+++ b/lib/livepatch/test_klp_atomic_replace.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+
+static int replace;
+module_param(replace, int, 0644);
+MODULE_PARM_DESC(replace, "replace (default=0)");
+
+#include <linux/seq_file.h>
+static int livepatch_meminfo_proc_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "%s: %s\n", THIS_MODULE->name,
+ "this has been live patched");
+ return 0;
+}
+
+static struct klp_func funcs[] = {
+ {
+ .old_name = "meminfo_proc_show",
+ .new_func = livepatch_meminfo_proc_show,
+ }, {}
+};
+
+static struct klp_object objs[] = {
+ {
+ /* name being NULL means vmlinux */
+ .funcs = funcs,
+ }, {}
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+ /* set .replace in the init function below for demo purposes */
+};
+
+static int test_klp_atomic_replace_init(void)
+{
+ patch.replace = replace;
+ return klp_enable_patch(&patch);
+}
+
+static void test_klp_atomic_replace_exit(void)
+{
+}
+
+module_init(test_klp_atomic_replace_init);
+module_exit(test_klp_atomic_replace_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
+MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
+MODULE_DESCRIPTION("Livepatch test: atomic replace");
diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c
new file mode 100644
index 000000000000..40beddf8a0e2
--- /dev/null
+++ b/lib/livepatch/test_klp_callbacks_busy.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+
+static int sleep_secs;
+module_param(sleep_secs, int, 0644);
+MODULE_PARM_DESC(sleep_secs, "sleep_secs (default=0)");
+
+static void busymod_work_func(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work, busymod_work_func);
+
+static void busymod_work_func(struct work_struct *work)
+{
+ pr_info("%s, sleeping %d seconds ...\n", __func__, sleep_secs);
+ msleep(sleep_secs * 1000);
+ pr_info("%s exit\n", __func__);
+}
+
+static int test_klp_callbacks_busy_init(void)
+{
+ pr_info("%s\n", __func__);
+ schedule_delayed_work(&work,
+ msecs_to_jiffies(1000 * 0));
+ return 0;
+}
+
+static void test_klp_callbacks_busy_exit(void)
+{
+ cancel_delayed_work_sync(&work);
+ pr_info("%s\n", __func__);
+}
+
+module_init(test_klp_callbacks_busy_init);
+module_exit(test_klp_callbacks_busy_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
+MODULE_DESCRIPTION("Livepatch test: busy target module");
diff --git a/lib/livepatch/test_klp_callbacks_demo.c b/lib/livepatch/test_klp_callbacks_demo.c
new file mode 100644
index 000000000000..3fd8fe1cd1cc
--- /dev/null
+++ b/lib/livepatch/test_klp_callbacks_demo.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+
+static int pre_patch_ret;
+module_param(pre_patch_ret, int, 0644);
+MODULE_PARM_DESC(pre_patch_ret, "pre_patch_ret (default=0)");
+
+static const char *const module_state[] = {
+ [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
+ [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
+ [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
+ [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
+};
+
+static void callback_info(const char *callback, struct klp_object *obj)
+{
+ if (obj->mod)
+ pr_info("%s: %s -> %s\n", callback, obj->mod->name,
+ module_state[obj->mod->state]);
+ else
+ pr_info("%s: vmlinux\n", callback);
+}
+
+/* Executed on object patching (ie, patch enablement) */
+static int pre_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ return pre_patch_ret;
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void pre_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+static void patched_work_func(struct work_struct *work)
+{
+ pr_info("%s\n", __func__);
+}
+
+static struct klp_func no_funcs[] = {
+ {}
+};
+
+static struct klp_func busymod_funcs[] = {
+ {
+ .old_name = "busymod_work_func",
+ .new_func = patched_work_func,
+ }, {}
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = NULL, /* vmlinux */
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, {
+ .name = "test_klp_callbacks_mod",
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, {
+ .name = "test_klp_callbacks_busy",
+ .funcs = busymod_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+};
+
+static int test_klp_callbacks_demo_init(void)
+{
+ return klp_enable_patch(&patch);
+}
+
+static void test_klp_callbacks_demo_exit(void)
+{
+}
+
+module_init(test_klp_callbacks_demo_init);
+module_exit(test_klp_callbacks_demo_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
+MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
+MODULE_DESCRIPTION("Livepatch test: livepatch demo");
diff --git a/lib/livepatch/test_klp_callbacks_demo2.c b/lib/livepatch/test_klp_callbacks_demo2.c
new file mode 100644
index 000000000000..5417573e80af
--- /dev/null
+++ b/lib/livepatch/test_klp_callbacks_demo2.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+
+static int replace;
+module_param(replace, int, 0644);
+MODULE_PARM_DESC(replace, "replace (default=0)");
+
+static const char *const module_state[] = {
+ [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
+ [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
+ [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
+ [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
+};
+
+static void callback_info(const char *callback, struct klp_object *obj)
+{
+ if (obj->mod)
+ pr_info("%s: %s -> %s\n", callback, obj->mod->name,
+ module_state[obj->mod->state]);
+ else
+ pr_info("%s: vmlinux\n", callback);
+}
+
+/* Executed on object patching (ie, patch enablement) */
+static int pre_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+ return 0;
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_patch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void pre_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+/* Executed on object unpatching (ie, patch disablement) */
+static void post_unpatch_callback(struct klp_object *obj)
+{
+ callback_info(__func__, obj);
+}
+
+static struct klp_func no_funcs[] = {
+ { }
+};
+
+static struct klp_object objs[] = {
+ {
+ .name = NULL, /* vmlinux */
+ .funcs = no_funcs,
+ .callbacks = {
+ .pre_patch = pre_patch_callback,
+ .post_patch = post_patch_callback,
+ .pre_unpatch = pre_unpatch_callback,
+ .post_unpatch = post_unpatch_callback,
+ },
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+ /* set .replace in the init function below for demo purposes */
+};
+
+static int test_klp_callbacks_demo2_init(void)
+{
+ patch.replace = replace;
+ return klp_enable_patch(&patch);
+}
+
+static void test_klp_callbacks_demo2_exit(void)
+{
+}
+
+module_init(test_klp_callbacks_demo2_init);
+module_exit(test_klp_callbacks_demo2_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
+MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
+MODULE_DESCRIPTION("Livepatch test: livepatch demo2");
diff --git a/lib/livepatch/test_klp_callbacks_mod.c b/lib/livepatch/test_klp_callbacks_mod.c
new file mode 100644
index 000000000000..8fbe645b1c2c
--- /dev/null
+++ b/lib/livepatch/test_klp_callbacks_mod.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+static int test_klp_callbacks_mod_init(void)
+{
+ pr_info("%s\n", __func__);
+ return 0;
+}
+
+static void test_klp_callbacks_mod_exit(void)
+{
+ pr_info("%s\n", __func__);
+}
+
+module_init(test_klp_callbacks_mod_init);
+module_exit(test_klp_callbacks_mod_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
+MODULE_DESCRIPTION("Livepatch test: target module");
diff --git a/lib/livepatch/test_klp_livepatch.c b/lib/livepatch/test_klp_livepatch.c
new file mode 100644
index 000000000000..aff08199de71
--- /dev/null
+++ b/lib/livepatch/test_klp_livepatch.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/livepatch.h>
+
+#include <linux/seq_file.h>
+static int livepatch_cmdline_proc_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "%s: %s\n", THIS_MODULE->name,
+ "this has been live patched");
+ return 0;
+}
+
+static struct klp_func funcs[] = {
+ {
+ .old_name = "cmdline_proc_show",
+ .new_func = livepatch_cmdline_proc_show,
+ }, { }
+};
+
+static struct klp_object objs[] = {
+ {
+ /* name being NULL means vmlinux */
+ .funcs = funcs,
+ }, { }
+};
+
+static struct klp_patch patch = {
+ .mod = THIS_MODULE,
+ .objs = objs,
+};
+
+static int test_klp_livepatch_init(void)
+{
+ return klp_enable_patch(&patch);
+}
+
+static void test_klp_livepatch_exit(void)
+{
+}
+
+module_init(test_klp_livepatch_init);
+module_exit(test_klp_livepatch_exit);
+MODULE_LICENSE("GPL");
+MODULE_INFO(livepatch, "Y");
+MODULE_AUTHOR("Seth Jennings <sjenning@redhat.com>");
+MODULE_DESCRIPTION("Livepatch test: livepatch module");
diff --git a/lib/livepatch/test_klp_shadow_vars.c b/lib/livepatch/test_klp_shadow_vars.c
new file mode 100644
index 000000000000..fe5c413efe96
--- /dev/null
+++ b/lib/livepatch/test_klp_shadow_vars.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/livepatch.h>
+#include <linux/slab.h>
+
+/*
+ * Keep a small list of pointers so that we can print address-agnostic
+ * pointer values. Use a rolling integer count to differentiate the values.
+ * Ironically we could have used the shadow variable API to do this, but
+ * let's not lean too heavily on the very code we're testing.
+ */
+static LIST_HEAD(ptr_list);
+struct shadow_ptr {
+ void *ptr;
+ int id;
+ struct list_head list;
+};
+
+static void free_ptr_list(void)
+{
+ struct shadow_ptr *sp, *tmp_sp;
+
+ list_for_each_entry_safe(sp, tmp_sp, &ptr_list, list) {
+ list_del(&sp->list);
+ kfree(sp);
+ }
+}
+
+static int ptr_id(void *ptr)
+{
+ struct shadow_ptr *sp;
+ static int count;
+
+ list_for_each_entry(sp, &ptr_list, list) {
+ if (sp->ptr == ptr)
+ return sp->id;
+ }
+
+ sp = kmalloc(sizeof(*sp), GFP_ATOMIC);
+ if (!sp)
+ return -ENOMEM;
+ sp->ptr = ptr;
+ sp->id = count++;
+
+ list_add(&sp->list, &ptr_list);
+
+ return sp->id;
+}
+
+/*
+ * Shadow variable wrapper functions that echo the function and arguments
+ * to the kernel log for testing verification. Don't display raw pointers,
+ * but use the ptr_id() value instead.
+ */
+static void *shadow_get(void *obj, unsigned long id)
+{
+ void *ret = klp_shadow_get(obj, id);
+
+ pr_info("klp_%s(obj=PTR%d, id=0x%lx) = PTR%d\n",
+ __func__, ptr_id(obj), id, ptr_id(ret));
+
+ return ret;
+}
+
+static void *shadow_alloc(void *obj, unsigned long id, size_t size,
+ gfp_t gfp_flags, klp_shadow_ctor_t ctor,
+ void *ctor_data)
+{
+ void *ret = klp_shadow_alloc(obj, id, size, gfp_flags, ctor,
+ ctor_data);
+ pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
+ __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
+ ptr_id(ctor_data), ptr_id(ret));
+ return ret;
+}
+
+static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size,
+ gfp_t gfp_flags, klp_shadow_ctor_t ctor,
+ void *ctor_data)
+{
+ void *ret = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor,
+ ctor_data);
+ pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
+ __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
+ ptr_id(ctor_data), ptr_id(ret));
+ return ret;
+}
+
+static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
+{
+ klp_shadow_free(obj, id, dtor);
+ pr_info("klp_%s(obj=PTR%d, id=0x%lx, dtor=PTR%d)\n",
+ __func__, ptr_id(obj), id, ptr_id(dtor));
+}
+
+static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
+{
+ klp_shadow_free_all(id, dtor);
+ pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n",
+ __func__, id, ptr_id(dtor));
+}
+
+
+/* Shadow variable constructor - remember simple pointer data */
+static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
+{
+ int **shadow_int = shadow_data;
+ *shadow_int = ctor_data;
+ pr_info("%s: PTR%d -> PTR%d\n",
+ __func__, ptr_id(shadow_int), ptr_id(ctor_data));
+
+ return 0;
+}
+
+static void shadow_dtor(void *obj, void *shadow_data)
+{
+ pr_info("%s(obj=PTR%d, shadow_data=PTR%d)\n",
+ __func__, ptr_id(obj), ptr_id(shadow_data));
+}
+
+static int test_klp_shadow_vars_init(void)
+{
+ void *obj = THIS_MODULE;
+ int id = 0x1234;
+ size_t size = sizeof(int *);
+ gfp_t gfp_flags = GFP_KERNEL;
+
+ int var1, var2, var3, var4;
+ int **sv1, **sv2, **sv3, **sv4;
+
+ void *ret;
+
+ ptr_id(NULL);
+ ptr_id(&var1);
+ ptr_id(&var2);
+ ptr_id(&var3);
+ ptr_id(&var4);
+
+ /*
+ * With an empty shadow variable hash table, expect not to find
+ * any matches.
+ */
+ ret = shadow_get(obj, id);
+ if (!ret)
+ pr_info(" got expected NULL result\n");
+
+ /*
+ * Allocate a few shadow variables with different <obj> and <id>.
+ */
+ sv1 = shadow_alloc(obj, id, size, gfp_flags, shadow_ctor, &var1);
+ if (!sv1)
+ return -ENOMEM;
+
+ sv2 = shadow_alloc(obj + 1, id, size, gfp_flags, shadow_ctor, &var2);
+ if (!sv2)
+ return -ENOMEM;
+
+ sv3 = shadow_alloc(obj, id + 1, size, gfp_flags, shadow_ctor, &var3);
+ if (!sv3)
+ return -ENOMEM;
+
+ /*
+ * Verify we can find our new shadow variables and that they point
+ * to expected data.
+ */
+ ret = shadow_get(obj, id);
+ if (!ret)
+ return -EINVAL;
+ if (ret == sv1 && *sv1 == &var1)
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv1), ptr_id(*sv1));
+
+ ret = shadow_get(obj + 1, id);
+ if (!ret)
+ return -EINVAL;
+ if (ret == sv2 && *sv2 == &var2)
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv2), ptr_id(*sv2));
+ ret = shadow_get(obj, id + 1);
+ if (!ret)
+ return -EINVAL;
+ if (ret == sv3 && *sv3 == &var3)
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv3), ptr_id(*sv3));
+
+ /*
+ * Allocate or get a few more, this time with the same <obj>, <id>.
+ * The second invocation should return the same shadow var.
+ */
+ sv4 = shadow_get_or_alloc(obj + 2, id, size, gfp_flags, shadow_ctor, &var4);
+ if (!sv4)
+ return -ENOMEM;
+
+ ret = shadow_get_or_alloc(obj + 2, id, size, gfp_flags, shadow_ctor, &var4);
+ if (!ret)
+ return -EINVAL;
+ if (ret == sv4 && *sv4 == &var4)
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv4), ptr_id(*sv4));
+
+ /*
+ * Free the <obj=*, id> shadow variables and check that we can no
+ * longer find them.
+ */
+ shadow_free(obj, id, shadow_dtor); /* sv1 */
+ ret = shadow_get(obj, id);
+ if (!ret)
+ pr_info(" got expected NULL result\n");
+
+ shadow_free(obj + 1, id, shadow_dtor); /* sv2 */
+ ret = shadow_get(obj + 1, id);
+ if (!ret)
+ pr_info(" got expected NULL result\n");
+
+ shadow_free(obj + 2, id, shadow_dtor); /* sv4 */
+ ret = shadow_get(obj + 2, id);
+ if (!ret)
+ pr_info(" got expected NULL result\n");
+
+ /*
+ * We should still find an <id+1> variable.
+ */
+ ret = shadow_get(obj, id + 1);
+ if (!ret)
+ return -EINVAL;
+ if (ret == sv3 && *sv3 == &var3)
+ pr_info(" got expected PTR%d -> PTR%d result\n",
+ ptr_id(sv3), ptr_id(*sv3));
+
+ /*
+ * Free all the <id+1> variables, too.
+ */
+ shadow_free_all(id + 1, shadow_dtor); /* sv3 */
+ ret = shadow_get(obj, id);
+ if (!ret)
+ pr_info(" shadow_get() got expected NULL result\n");
+
+
+ free_ptr_list();
+
+ return 0;
+}
+
+static void test_klp_shadow_vars_exit(void)
+{
+}
+
+module_init(test_klp_shadow_vars_init);
+module_exit(test_klp_shadow_vars_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
+MODULE_DESCRIPTION("Livepatch test: shadow variables");
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 1e1bbf171eca..a1705545e6ac 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -1989,6 +1989,7 @@ void locking_selftest(void)
init_shared_classes();
debug_locks_silent = !debug_locks_verbose;
+ lockdep_set_selftest_task(current);
DO_TESTCASE_6R("A-A deadlock", AA);
DO_TESTCASE_6R("A-B-B-A deadlock", ABBA);
@@ -2097,5 +2098,6 @@ void locking_selftest(void)
printk("---------------------------------\n");
debug_locks = 1;
}
+ lockdep_set_selftest_task(NULL);
debug_locks_silent = 0;
}
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index 236eb21167b5..a8ede77afe0d 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -20,7 +20,8 @@
static noinline size_t
lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len,
- size_t ti, void *wrkmem)
+ size_t ti, void *wrkmem, signed char *state_offset,
+ const unsigned char bitstream_version)
{
const unsigned char *ip;
unsigned char *op;
@@ -35,27 +36,85 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
ip += ti < 4 ? 4 - ti : 0;
for (;;) {
- const unsigned char *m_pos;
+ const unsigned char *m_pos = NULL;
size_t t, m_len, m_off;
u32 dv;
+ u32 run_length = 0;
literal:
ip += 1 + ((ip - ii) >> 5);
next:
if (unlikely(ip >= ip_end))
break;
dv = get_unaligned_le32(ip);
- t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK;
- m_pos = in + dict[t];
- dict[t] = (lzo_dict_t) (ip - in);
- if (unlikely(dv != get_unaligned_le32(m_pos)))
- goto literal;
+
+ if (dv == 0 && bitstream_version) {
+ const unsigned char *ir = ip + 4;
+ const unsigned char *limit = ip_end
+ < (ip + MAX_ZERO_RUN_LENGTH + 1)
+ ? ip_end : ip + MAX_ZERO_RUN_LENGTH + 1;
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
+ defined(LZO_FAST_64BIT_MEMORY_ACCESS)
+ u64 dv64;
+
+ for (; (ir + 32) <= limit; ir += 32) {
+ dv64 = get_unaligned((u64 *)ir);
+ dv64 |= get_unaligned((u64 *)ir + 1);
+ dv64 |= get_unaligned((u64 *)ir + 2);
+ dv64 |= get_unaligned((u64 *)ir + 3);
+ if (dv64)
+ break;
+ }
+ for (; (ir + 8) <= limit; ir += 8) {
+ dv64 = get_unaligned((u64 *)ir);
+ if (dv64) {
+# if defined(__LITTLE_ENDIAN)
+ ir += __builtin_ctzll(dv64) >> 3;
+# elif defined(__BIG_ENDIAN)
+ ir += __builtin_clzll(dv64) >> 3;
+# else
+# error "missing endian definition"
+# endif
+ break;
+ }
+ }
+#else
+ while ((ir < (const unsigned char *)
+ ALIGN((uintptr_t)ir, 4)) &&
+ (ir < limit) && (*ir == 0))
+ ir++;
+ for (; (ir + 4) <= limit; ir += 4) {
+ dv = *((u32 *)ir);
+ if (dv) {
+# if defined(__LITTLE_ENDIAN)
+ ir += __builtin_ctz(dv) >> 3;
+# elif defined(__BIG_ENDIAN)
+ ir += __builtin_clz(dv) >> 3;
+# else
+# error "missing endian definition"
+# endif
+ break;
+ }
+ }
+#endif
+ while (likely(ir < limit) && unlikely(*ir == 0))
+ ir++;
+ run_length = ir - ip;
+ if (run_length > MAX_ZERO_RUN_LENGTH)
+ run_length = MAX_ZERO_RUN_LENGTH;
+ } else {
+ t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK;
+ m_pos = in + dict[t];
+ dict[t] = (lzo_dict_t) (ip - in);
+ if (unlikely(dv != get_unaligned_le32(m_pos)))
+ goto literal;
+ }
ii -= ti;
ti = 0;
t = ip - ii;
if (t != 0) {
if (t <= 3) {
- op[-2] |= t;
+ op[*state_offset] |= t;
COPY4(op, ii);
op += t;
} else if (t <= 16) {
@@ -88,6 +147,17 @@ next:
}
}
+ if (unlikely(run_length)) {
+ ip += run_length;
+ run_length -= MIN_ZERO_RUN_LENGTH;
+ put_unaligned_le32((run_length << 21) | 0xfffc18
+ | (run_length & 0x7), op);
+ op += 4;
+ run_length = 0;
+ *state_offset = -3;
+ goto finished_writing_instruction;
+ }
+
m_len = 4;
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64)
@@ -170,7 +240,6 @@ m_len_done:
m_off = ip - m_pos;
ip += m_len;
- ii = ip;
if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) {
m_off -= 1;
*op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2));
@@ -207,29 +276,48 @@ m_len_done:
*op++ = (m_off << 2);
*op++ = (m_off >> 6);
}
+ *state_offset = -2;
+finished_writing_instruction:
+ ii = ip;
goto next;
}
*out_len = op - out;
return in_end - (ii - ti);
}
-int lzo1x_1_compress(const unsigned char *in, size_t in_len,
+int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len,
- void *wrkmem)
+ void *wrkmem, const unsigned char bitstream_version)
{
const unsigned char *ip = in;
unsigned char *op = out;
+ unsigned char *data_start;
size_t l = in_len;
size_t t = 0;
+ signed char state_offset = -2;
+ unsigned int m4_max_offset;
+
+ // LZO v0 will never write 17 as first byte (except for zero-length
+ // input), so this is used to version the bitstream
+ if (bitstream_version > 0) {
+ *op++ = 17;
+ *op++ = bitstream_version;
+ m4_max_offset = M4_MAX_OFFSET_V1;
+ } else {
+ m4_max_offset = M4_MAX_OFFSET_V0;
+ }
+
+ data_start = op;
while (l > 20) {
- size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1);
+ size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1);
uintptr_t ll_end = (uintptr_t) ip + ll;
if ((ll_end + ((t + ll) >> 5)) <= ll_end)
break;
BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS);
memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t));
- t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem);
+ t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem,
+ &state_offset, bitstream_version);
ip += ll;
op += *out_len;
l -= ll;
@@ -239,10 +327,10 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len,
if (t > 0) {
const unsigned char *ii = in + in_len - t;
- if (op == out && t <= 238) {
+ if (op == data_start && t <= 238) {
*op++ = (17 + t);
} else if (t <= 3) {
- op[-2] |= t;
+ op[state_offset] |= t;
} else if (t <= 18) {
*op++ = (t - 3);
} else {
@@ -273,7 +361,24 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len,
*out_len = op - out;
return LZO_E_OK;
}
+
+int lzo1x_1_compress(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len,
+ void *wrkmem)
+{
+ return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0);
+}
+
+int lzorle1x_1_compress(const unsigned char *in, size_t in_len,
+ unsigned char *out, size_t *out_len,
+ void *wrkmem)
+{
+ return lzogeneric1x_1_compress(in, in_len, out, out_len,
+ wrkmem, LZO_VERSION);
+}
+
EXPORT_SYMBOL_GPL(lzo1x_1_compress);
+EXPORT_SYMBOL_GPL(lzorle1x_1_compress);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZO1X-1 Compressor");
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index a1c387f6afba..9e07e9ef1aad 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -46,11 +46,21 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
const unsigned char * const ip_end = in + in_len;
unsigned char * const op_end = out + *out_len;
+ unsigned char bitstream_version;
+
op = out;
ip = in;
if (unlikely(in_len < 3))
goto input_overrun;
+
+ if (likely(in_len >= 5) && likely(*ip == 17)) {
+ bitstream_version = ip[1];
+ ip += 2;
+ } else {
+ bitstream_version = 0;
+ }
+
if (*ip > 17) {
t = *ip++ - 17;
if (t < 4) {
@@ -154,32 +164,49 @@ copy_literal_run:
m_pos -= next >> 2;
next &= 3;
} else {
- m_pos = op;
- m_pos -= (t & 8) << 11;
- t = (t & 7) + (3 - 1);
- if (unlikely(t == 2)) {
- size_t offset;
- const unsigned char *ip_last = ip;
+ NEED_IP(2);
+ next = get_unaligned_le16(ip);
+ if (((next & 0xfffc) == 0xfffc) &&
+ ((t & 0xf8) == 0x18) &&
+ likely(bitstream_version)) {
+ NEED_IP(3);
+ t &= 7;
+ t |= ip[2] << 3;
+ t += MIN_ZERO_RUN_LENGTH;
+ NEED_OP(t);
+ memset(op, 0, t);
+ op += t;
+ next &= 3;
+ ip += 3;
+ goto match_next;
+ } else {
+ m_pos = op;
+ m_pos -= (t & 8) << 11;
+ t = (t & 7) + (3 - 1);
+ if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
- while (unlikely(*ip == 0)) {
- ip++;
- NEED_IP(1);
- }
- offset = ip - ip_last;
- if (unlikely(offset > MAX_255_COUNT))
- return LZO_E_ERROR;
+ while (unlikely(*ip == 0)) {
+ ip++;
+ NEED_IP(1);
+ }
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
- offset = (offset << 8) - offset;
- t += offset + 7 + *ip++;
- NEED_IP(2);
+ offset = (offset << 8) - offset;
+ t += offset + 7 + *ip++;
+ NEED_IP(2);
+ next = get_unaligned_le16(ip);
+ }
+ ip += 2;
+ m_pos -= next >> 2;
+ next &= 3;
+ if (m_pos == op)
+ goto eof_found;
+ m_pos -= 0x4000;
}
- next = get_unaligned_le16(ip);
- ip += 2;
- m_pos -= next >> 2;
- next &= 3;
- if (m_pos == op)
- goto eof_found;
- m_pos -= 0x4000;
}
TEST_LB(m_pos);
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
diff --git a/lib/lzo/lzodefs.h b/lib/lzo/lzodefs.h
index 4edefd2f540c..b60851fcf6ce 100644
--- a/lib/lzo/lzodefs.h
+++ b/lib/lzo/lzodefs.h
@@ -13,9 +13,15 @@
*/
+/* Version
+ * 0: original lzo version
+ * 1: lzo with support for RLE
+ */
+#define LZO_VERSION 1
+
#define COPY4(dst, src) \
put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst))
-#if defined(__x86_64__)
+#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64)
#define COPY8(dst, src) \
put_unaligned(get_unaligned((const u64 *)(src)), (u64 *)(dst))
#else
@@ -25,19 +31,21 @@
#if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
#error "conflicting endian definitions"
-#elif defined(__x86_64__)
+#elif defined(CONFIG_X86_64) || defined(CONFIG_ARM64)
#define LZO_USE_CTZ64 1
#define LZO_USE_CTZ32 1
-#elif defined(__i386__) || defined(__powerpc__)
+#define LZO_FAST_64BIT_MEMORY_ACCESS
+#elif defined(CONFIG_X86) || defined(CONFIG_PPC)
#define LZO_USE_CTZ32 1
-#elif defined(__arm__) && (__LINUX_ARM_ARCH__ >= 5)
+#elif defined(CONFIG_ARM) && (__LINUX_ARM_ARCH__ >= 5)
#define LZO_USE_CTZ32 1
#endif
#define M1_MAX_OFFSET 0x0400
#define M2_MAX_OFFSET 0x0800
#define M3_MAX_OFFSET 0x4000
-#define M4_MAX_OFFSET 0xbfff
+#define M4_MAX_OFFSET_V0 0xbfff
+#define M4_MAX_OFFSET_V1 0xbffe
#define M1_MIN_LEN 2
#define M1_MAX_LEN 2
@@ -53,6 +61,9 @@
#define M3_MARKER 32
#define M4_MARKER 16
+#define MIN_ZERO_RUN_LENGTH 4
+#define MAX_ZERO_RUN_LENGTH (2047 + MIN_ZERO_RUN_LENGTH)
+
#define lzo_dict_t unsigned short
#define D_BITS 13
#define D_SIZE (1u << D_BITS)
diff --git a/lib/objagg.c b/lib/objagg.c
new file mode 100644
index 000000000000..576be22e86de
--- /dev/null
+++ b/lib/objagg.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rhashtable.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#include <linux/objagg.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/objagg.h>
+
+struct objagg_hints {
+ struct rhashtable node_ht;
+ struct rhashtable_params ht_params;
+ struct list_head node_list;
+ unsigned int node_count;
+ unsigned int root_count;
+ unsigned int refcount;
+ const struct objagg_ops *ops;
+};
+
+struct objagg_hints_node {
+ struct rhash_head ht_node; /* member of objagg_hints->node_ht */
+ struct list_head list; /* member of objagg_hints->node_list */
+ struct objagg_hints_node *parent;
+ unsigned int root_id;
+ struct objagg_obj_stats_info stats_info;
+ unsigned long obj[0];
+};
+
+static struct objagg_hints_node *
+objagg_hints_lookup(struct objagg_hints *objagg_hints, void *obj)
+{
+ if (!objagg_hints)
+ return NULL;
+ return rhashtable_lookup_fast(&objagg_hints->node_ht, obj,
+ objagg_hints->ht_params);
+}
+
+struct objagg {
+ const struct objagg_ops *ops;
+ void *priv;
+ struct rhashtable obj_ht;
+ struct rhashtable_params ht_params;
+ struct list_head obj_list;
+ unsigned int obj_count;
+ struct ida root_ida;
+ struct objagg_hints *hints;
+};
+
+struct objagg_obj {
+ struct rhash_head ht_node; /* member of objagg->obj_ht */
+ struct list_head list; /* member of objagg->obj_list */
+ struct objagg_obj *parent; /* if the object is nested, this
+ * holds pointer to parent, otherwise NULL
+ */
+ union {
+ void *delta_priv; /* user delta private */
+ void *root_priv; /* user root private */
+ };
+ unsigned int root_id;
+ unsigned int refcount; /* counts number of users of this object
+ * including nested objects
+ */
+ struct objagg_obj_stats stats;
+ unsigned long obj[0];
+};
+
+static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj)
+{
+ return ++objagg_obj->refcount;
+}
+
+static unsigned int objagg_obj_ref_dec(struct objagg_obj *objagg_obj)
+{
+ return --objagg_obj->refcount;
+}
+
+static void objagg_obj_stats_inc(struct objagg_obj *objagg_obj)
+{
+ objagg_obj->stats.user_count++;
+ objagg_obj->stats.delta_user_count++;
+ if (objagg_obj->parent)
+ objagg_obj->parent->stats.delta_user_count++;
+}
+
+static void objagg_obj_stats_dec(struct objagg_obj *objagg_obj)
+{
+ objagg_obj->stats.user_count--;
+ objagg_obj->stats.delta_user_count--;
+ if (objagg_obj->parent)
+ objagg_obj->parent->stats.delta_user_count--;
+}
+
+static bool objagg_obj_is_root(const struct objagg_obj *objagg_obj)
+{
+ /* Nesting is not supported, so we can use ->parent
+ * to figure out if the object is root.
+ */
+ return !objagg_obj->parent;
+}
+
+/**
+ * objagg_obj_root_priv - obtains root private for an object
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Either the object is root itself when the private is returned
+ * directly, or the parent is root and its private is returned
+ * instead.
+ *
+ * Returns a user private root pointer.
+ */
+const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj)
+{
+ if (objagg_obj_is_root(objagg_obj))
+ return objagg_obj->root_priv;
+ WARN_ON(!objagg_obj_is_root(objagg_obj->parent));
+ return objagg_obj->parent->root_priv;
+}
+EXPORT_SYMBOL(objagg_obj_root_priv);
+
+/**
+ * objagg_obj_delta_priv - obtains delta private for an object
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Returns user private delta pointer or NULL in case the passed
+ * object is root.
+ */
+const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj)
+{
+ if (objagg_obj_is_root(objagg_obj))
+ return NULL;
+ return objagg_obj->delta_priv;
+}
+EXPORT_SYMBOL(objagg_obj_delta_priv);
+
+/**
+ * objagg_obj_raw - obtains object user private pointer
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Returns user private pointer as was passed to objagg_obj_get() by "obj" arg.
+ */
+const void *objagg_obj_raw(const struct objagg_obj *objagg_obj)
+{
+ return objagg_obj->obj;
+}
+EXPORT_SYMBOL(objagg_obj_raw);
+
+static struct objagg_obj *objagg_obj_lookup(struct objagg *objagg, void *obj)
+{
+ return rhashtable_lookup_fast(&objagg->obj_ht, obj, objagg->ht_params);
+}
+
+static int objagg_obj_parent_assign(struct objagg *objagg,
+ struct objagg_obj *objagg_obj,
+ struct objagg_obj *parent,
+ bool take_parent_ref)
+{
+ void *delta_priv;
+
+ delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj,
+ objagg_obj->obj);
+ if (IS_ERR(delta_priv))
+ return PTR_ERR(delta_priv);
+
+ /* User returned a delta private, that means that
+ * our object can be aggregated into the parent.
+ */
+ objagg_obj->parent = parent;
+ objagg_obj->delta_priv = delta_priv;
+ if (take_parent_ref)
+ objagg_obj_ref_inc(objagg_obj->parent);
+ trace_objagg_obj_parent_assign(objagg, objagg_obj,
+ parent,
+ parent->refcount);
+ return 0;
+}
+
+static int objagg_obj_parent_lookup_assign(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ struct objagg_obj *objagg_obj_cur;
+ int err;
+
+ list_for_each_entry(objagg_obj_cur, &objagg->obj_list, list) {
+ /* Nesting is not supported. In case the object
+ * is not root, it cannot be assigned as parent.
+ */
+ if (!objagg_obj_is_root(objagg_obj_cur))
+ continue;
+ err = objagg_obj_parent_assign(objagg, objagg_obj,
+ objagg_obj_cur, true);
+ if (!err)
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static void __objagg_obj_put(struct objagg *objagg,
+ struct objagg_obj *objagg_obj);
+
+static void objagg_obj_parent_unassign(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_parent_unassign(objagg, objagg_obj,
+ objagg_obj->parent,
+ objagg_obj->parent->refcount);
+ objagg->ops->delta_destroy(objagg->priv, objagg_obj->delta_priv);
+ __objagg_obj_put(objagg, objagg_obj->parent);
+}
+
+static int objagg_obj_root_id_alloc(struct objagg *objagg,
+ struct objagg_obj *objagg_obj,
+ struct objagg_hints_node *hnode)
+{
+ unsigned int min, max;
+ int root_id;
+
+ /* In case there are no hints available, the root id is invalid. */
+ if (!objagg->hints) {
+ objagg_obj->root_id = OBJAGG_OBJ_ROOT_ID_INVALID;
+ return 0;
+ }
+
+ if (hnode) {
+ min = hnode->root_id;
+ max = hnode->root_id;
+ } else {
+ /* For objects with no hint, start after the last
+ * hinted root_id.
+ */
+ min = objagg->hints->root_count;
+ max = ~0;
+ }
+
+ root_id = ida_alloc_range(&objagg->root_ida, min, max, GFP_KERNEL);
+
+ if (root_id < 0)
+ return root_id;
+ objagg_obj->root_id = root_id;
+ return 0;
+}
+
+static void objagg_obj_root_id_free(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ if (!objagg->hints)
+ return;
+ ida_free(&objagg->root_ida, objagg_obj->root_id);
+}
+
+static int objagg_obj_root_create(struct objagg *objagg,
+ struct objagg_obj *objagg_obj,
+ struct objagg_hints_node *hnode)
+{
+ int err;
+
+ err = objagg_obj_root_id_alloc(objagg, objagg_obj, hnode);
+ if (err)
+ return err;
+ objagg_obj->root_priv = objagg->ops->root_create(objagg->priv,
+ objagg_obj->obj,
+ objagg_obj->root_id);
+ if (IS_ERR(objagg_obj->root_priv)) {
+ err = PTR_ERR(objagg_obj->root_priv);
+ goto err_root_create;
+ }
+ trace_objagg_obj_root_create(objagg, objagg_obj);
+ return 0;
+
+err_root_create:
+ objagg_obj_root_id_free(objagg, objagg_obj);
+ return err;
+}
+
+static void objagg_obj_root_destroy(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_root_destroy(objagg, objagg_obj);
+ objagg->ops->root_destroy(objagg->priv, objagg_obj->root_priv);
+ objagg_obj_root_id_free(objagg, objagg_obj);
+}
+
+static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj);
+
+static int objagg_obj_init_with_hints(struct objagg *objagg,
+ struct objagg_obj *objagg_obj,
+ bool *hint_found)
+{
+ struct objagg_hints_node *hnode;
+ struct objagg_obj *parent;
+ int err;
+
+ hnode = objagg_hints_lookup(objagg->hints, objagg_obj->obj);
+ if (!hnode) {
+ *hint_found = false;
+ return 0;
+ }
+ *hint_found = true;
+
+ if (!hnode->parent)
+ return objagg_obj_root_create(objagg, objagg_obj, hnode);
+
+ parent = __objagg_obj_get(objagg, hnode->parent->obj);
+ if (IS_ERR(parent))
+ return PTR_ERR(parent);
+
+ err = objagg_obj_parent_assign(objagg, objagg_obj, parent, false);
+ if (err) {
+ *hint_found = false;
+ err = 0;
+ goto err_parent_assign;
+ }
+
+ return 0;
+
+err_parent_assign:
+ objagg_obj_put(objagg, parent);
+ return err;
+}
+
+static int objagg_obj_init(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ bool hint_found;
+ int err;
+
+ /* First, try to use hints if they are available and
+ * if they provide result.
+ */
+ err = objagg_obj_init_with_hints(objagg, objagg_obj, &hint_found);
+ if (err)
+ return err;
+
+ if (hint_found)
+ return 0;
+
+ /* Try to find if the object can be aggregated under an existing one. */
+ err = objagg_obj_parent_lookup_assign(objagg, objagg_obj);
+ if (!err)
+ return 0;
+ /* If aggregation is not possible, make the object a root. */
+ return objagg_obj_root_create(objagg, objagg_obj, NULL);
+}
+
+static void objagg_obj_fini(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ if (!objagg_obj_is_root(objagg_obj))
+ objagg_obj_parent_unassign(objagg, objagg_obj);
+ else
+ objagg_obj_root_destroy(objagg, objagg_obj);
+}
+
+static struct objagg_obj *objagg_obj_create(struct objagg *objagg, void *obj)
+{
+ struct objagg_obj *objagg_obj;
+ int err;
+
+ objagg_obj = kzalloc(sizeof(*objagg_obj) + objagg->ops->obj_size,
+ GFP_KERNEL);
+ if (!objagg_obj)
+ return ERR_PTR(-ENOMEM);
+ objagg_obj_ref_inc(objagg_obj);
+ memcpy(objagg_obj->obj, obj, objagg->ops->obj_size);
+
+ err = objagg_obj_init(objagg, objagg_obj);
+ if (err)
+ goto err_obj_init;
+
+ err = rhashtable_insert_fast(&objagg->obj_ht, &objagg_obj->ht_node,
+ objagg->ht_params);
+ if (err)
+ goto err_ht_insert;
+ list_add(&objagg_obj->list, &objagg->obj_list);
+ objagg->obj_count++;
+ trace_objagg_obj_create(objagg, objagg_obj);
+
+ return objagg_obj;
+
+err_ht_insert:
+ objagg_obj_fini(objagg, objagg_obj);
+err_obj_init:
+ kfree(objagg_obj);
+ return ERR_PTR(err);
+}
+
+static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj)
+{
+ struct objagg_obj *objagg_obj;
+
+ /* First, try to find the object exactly as user passed it,
+ * perhaps it is already in use.
+ */
+ objagg_obj = objagg_obj_lookup(objagg, obj);
+ if (objagg_obj) {
+ objagg_obj_ref_inc(objagg_obj);
+ return objagg_obj;
+ }
+
+ return objagg_obj_create(objagg, obj);
+}
+
+/**
+ * objagg_obj_get - gets an object within objagg instance
+ * @objagg: objagg instance
+ * @obj: user-specific private object pointer
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Size of the "obj" memory is specified in "objagg->ops".
+ *
+ * There are 3 main options this function wraps:
+ * 1) The object according to "obj" already exist. In that case
+ * the reference counter is incrementes and the object is returned.
+ * 2) The object does not exist, but it can be aggregated within
+ * another object. In that case, user ops->delta_create() is called
+ * to obtain delta data and a new object is created with returned
+ * user-delta private pointer.
+ * 3) The object does not exist and cannot be aggregated into
+ * any of the existing objects. In that case, user ops->root_create()
+ * is called to create the root and a new object is created with
+ * returned user-root private pointer.
+ *
+ * Returns a pointer to objagg object instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj)
+{
+ struct objagg_obj *objagg_obj;
+
+ objagg_obj = __objagg_obj_get(objagg, obj);
+ if (IS_ERR(objagg_obj))
+ return objagg_obj;
+ objagg_obj_stats_inc(objagg_obj);
+ trace_objagg_obj_get(objagg, objagg_obj, objagg_obj->refcount);
+ return objagg_obj;
+}
+EXPORT_SYMBOL(objagg_obj_get);
+
+static void objagg_obj_destroy(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_destroy(objagg, objagg_obj);
+ --objagg->obj_count;
+ list_del(&objagg_obj->list);
+ rhashtable_remove_fast(&objagg->obj_ht, &objagg_obj->ht_node,
+ objagg->ht_params);
+ objagg_obj_fini(objagg, objagg_obj);
+ kfree(objagg_obj);
+}
+
+static void __objagg_obj_put(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ if (!objagg_obj_ref_dec(objagg_obj))
+ objagg_obj_destroy(objagg, objagg_obj);
+}
+
+/**
+ * objagg_obj_put - puts an object within objagg instance
+ * @objagg: objagg instance
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Symmetric to objagg_obj_get().
+ */
+void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_put(objagg, objagg_obj, objagg_obj->refcount);
+ objagg_obj_stats_dec(objagg_obj);
+ __objagg_obj_put(objagg, objagg_obj);
+}
+EXPORT_SYMBOL(objagg_obj_put);
+
+/**
+ * objagg_create - creates a new objagg instance
+ * @ops: user-specific callbacks
+ * @objagg_hints: hints, can be NULL
+ * @priv: pointer to a private data passed to the ops
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * The purpose of the library is to provide an infrastructure to
+ * aggregate user-specified objects. Library does not care about the type
+ * of the object. User fills-up ops which take care of the specific
+ * user object manipulation.
+ *
+ * As a very stupid example, consider integer numbers. For example
+ * number 8 as a root object. That can aggregate number 9 with delta 1,
+ * number 10 with delta 2, etc. This example is implemented as
+ * a part of a testing module in test_objagg.c file.
+ *
+ * Each objagg instance contains multiple trees. Each tree node is
+ * represented by "an object". In the current implementation there can be
+ * only roots and leafs nodes. Leaf nodes are called deltas.
+ * But in general, this can be easily extended for intermediate nodes.
+ * In that extension, a delta would be associated with all non-root
+ * nodes.
+ *
+ * Returns a pointer to newly created objagg instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+struct objagg *objagg_create(const struct objagg_ops *ops,
+ struct objagg_hints *objagg_hints, void *priv)
+{
+ struct objagg *objagg;
+ int err;
+
+ if (WARN_ON(!ops || !ops->root_create || !ops->root_destroy ||
+ !ops->delta_check || !ops->delta_create ||
+ !ops->delta_destroy))
+ return ERR_PTR(-EINVAL);
+
+ objagg = kzalloc(sizeof(*objagg), GFP_KERNEL);
+ if (!objagg)
+ return ERR_PTR(-ENOMEM);
+ objagg->ops = ops;
+ if (objagg_hints) {
+ objagg->hints = objagg_hints;
+ objagg_hints->refcount++;
+ }
+ objagg->priv = priv;
+ INIT_LIST_HEAD(&objagg->obj_list);
+
+ objagg->ht_params.key_len = ops->obj_size;
+ objagg->ht_params.key_offset = offsetof(struct objagg_obj, obj);
+ objagg->ht_params.head_offset = offsetof(struct objagg_obj, ht_node);
+
+ err = rhashtable_init(&objagg->obj_ht, &objagg->ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ ida_init(&objagg->root_ida);
+
+ trace_objagg_create(objagg);
+ return objagg;
+
+err_rhashtable_init:
+ kfree(objagg);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(objagg_create);
+
+/**
+ * objagg_destroy - destroys a new objagg instance
+ * @objagg: objagg instance
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void objagg_destroy(struct objagg *objagg)
+{
+ trace_objagg_destroy(objagg);
+ ida_destroy(&objagg->root_ida);
+ WARN_ON(!list_empty(&objagg->obj_list));
+ rhashtable_destroy(&objagg->obj_ht);
+ if (objagg->hints)
+ objagg_hints_put(objagg->hints);
+ kfree(objagg);
+}
+EXPORT_SYMBOL(objagg_destroy);
+
+static int objagg_stats_info_sort_cmp_func(const void *a, const void *b)
+{
+ const struct objagg_obj_stats_info *stats_info1 = a;
+ const struct objagg_obj_stats_info *stats_info2 = b;
+
+ if (stats_info1->is_root != stats_info2->is_root)
+ return stats_info2->is_root - stats_info1->is_root;
+ if (stats_info1->stats.delta_user_count !=
+ stats_info2->stats.delta_user_count)
+ return stats_info2->stats.delta_user_count -
+ stats_info1->stats.delta_user_count;
+ return stats_info2->stats.user_count - stats_info1->stats.user_count;
+}
+
+/**
+ * objagg_stats_get - obtains stats of the objagg instance
+ * @objagg: objagg instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * The returned structure contains statistics of all object
+ * currently in use, ordered by following rules:
+ * 1) Root objects are always on lower indexes than the rest.
+ * 2) Objects with higher delta user count are always on lower
+ * indexes.
+ * 3) In case more objects have the same delta user count,
+ * the objects are ordered by user count.
+ *
+ * Returns a pointer to stats instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
+{
+ struct objagg_stats *objagg_stats;
+ struct objagg_obj *objagg_obj;
+ size_t alloc_size;
+ int i;
+
+ alloc_size = sizeof(*objagg_stats) +
+ sizeof(objagg_stats->stats_info[0]) * objagg->obj_count;
+ objagg_stats = kzalloc(alloc_size, GFP_KERNEL);
+ if (!objagg_stats)
+ return ERR_PTR(-ENOMEM);
+
+ i = 0;
+ list_for_each_entry(objagg_obj, &objagg->obj_list, list) {
+ memcpy(&objagg_stats->stats_info[i].stats, &objagg_obj->stats,
+ sizeof(objagg_stats->stats_info[0].stats));
+ objagg_stats->stats_info[i].objagg_obj = objagg_obj;
+ objagg_stats->stats_info[i].is_root =
+ objagg_obj_is_root(objagg_obj);
+ if (objagg_stats->stats_info[i].is_root)
+ objagg_stats->root_count++;
+ i++;
+ }
+ objagg_stats->stats_info_count = i;
+
+ sort(objagg_stats->stats_info, objagg_stats->stats_info_count,
+ sizeof(struct objagg_obj_stats_info),
+ objagg_stats_info_sort_cmp_func, NULL);
+
+ return objagg_stats;
+}
+EXPORT_SYMBOL(objagg_stats_get);
+
+/**
+ * objagg_stats_put - puts stats of the objagg instance
+ * @objagg_stats: objagg instance stats
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void objagg_stats_put(const struct objagg_stats *objagg_stats)
+{
+ kfree(objagg_stats);
+}
+EXPORT_SYMBOL(objagg_stats_put);
+
+static struct objagg_hints_node *
+objagg_hints_node_create(struct objagg_hints *objagg_hints,
+ struct objagg_obj *objagg_obj, size_t obj_size,
+ struct objagg_hints_node *parent_hnode)
+{
+ unsigned int user_count = objagg_obj->stats.user_count;
+ struct objagg_hints_node *hnode;
+ int err;
+
+ hnode = kzalloc(sizeof(*hnode) + obj_size, GFP_KERNEL);
+ if (!hnode)
+ return ERR_PTR(-ENOMEM);
+ memcpy(hnode->obj, &objagg_obj->obj, obj_size);
+ hnode->stats_info.stats.user_count = user_count;
+ hnode->stats_info.stats.delta_user_count = user_count;
+ if (parent_hnode) {
+ parent_hnode->stats_info.stats.delta_user_count += user_count;
+ } else {
+ hnode->root_id = objagg_hints->root_count++;
+ hnode->stats_info.is_root = true;
+ }
+ hnode->stats_info.objagg_obj = objagg_obj;
+
+ err = rhashtable_insert_fast(&objagg_hints->node_ht, &hnode->ht_node,
+ objagg_hints->ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ list_add(&hnode->list, &objagg_hints->node_list);
+ hnode->parent = parent_hnode;
+ objagg_hints->node_count++;
+
+ return hnode;
+
+err_ht_insert:
+ kfree(hnode);
+ return ERR_PTR(err);
+}
+
+static void objagg_hints_flush(struct objagg_hints *objagg_hints)
+{
+ struct objagg_hints_node *hnode, *tmp;
+
+ list_for_each_entry_safe(hnode, tmp, &objagg_hints->node_list, list) {
+ list_del(&hnode->list);
+ rhashtable_remove_fast(&objagg_hints->node_ht, &hnode->ht_node,
+ objagg_hints->ht_params);
+ kfree(hnode);
+ }
+}
+
+struct objagg_tmp_node {
+ struct objagg_obj *objagg_obj;
+ bool crossed_out;
+};
+
+struct objagg_tmp_graph {
+ struct objagg_tmp_node *nodes;
+ unsigned long nodes_count;
+ unsigned long *edges;
+};
+
+static int objagg_tmp_graph_edge_index(struct objagg_tmp_graph *graph,
+ int parent_index, int index)
+{
+ return index * graph->nodes_count + parent_index;
+}
+
+static void objagg_tmp_graph_edge_set(struct objagg_tmp_graph *graph,
+ int parent_index, int index)
+{
+ int edge_index = objagg_tmp_graph_edge_index(graph, index,
+ parent_index);
+
+ __set_bit(edge_index, graph->edges);
+}
+
+static bool objagg_tmp_graph_is_edge(struct objagg_tmp_graph *graph,
+ int parent_index, int index)
+{
+ int edge_index = objagg_tmp_graph_edge_index(graph, index,
+ parent_index);
+
+ return test_bit(edge_index, graph->edges);
+}
+
+static unsigned int objagg_tmp_graph_node_weight(struct objagg_tmp_graph *graph,
+ unsigned int index)
+{
+ struct objagg_tmp_node *node = &graph->nodes[index];
+ unsigned int weight = node->objagg_obj->stats.user_count;
+ int j;
+
+ /* Node weight is sum of node users and all other nodes users
+ * that this node can represent with delta.
+ */
+
+ for (j = 0; j < graph->nodes_count; j++) {
+ if (!objagg_tmp_graph_is_edge(graph, index, j))
+ continue;
+ node = &graph->nodes[j];
+ if (node->crossed_out)
+ continue;
+ weight += node->objagg_obj->stats.user_count;
+ }
+ return weight;
+}
+
+static int objagg_tmp_graph_node_max_weight(struct objagg_tmp_graph *graph)
+{
+ struct objagg_tmp_node *node;
+ unsigned int max_weight = 0;
+ unsigned int weight;
+ int max_index = -1;
+ int i;
+
+ for (i = 0; i < graph->nodes_count; i++) {
+ node = &graph->nodes[i];
+ if (node->crossed_out)
+ continue;
+ weight = objagg_tmp_graph_node_weight(graph, i);
+ if (weight >= max_weight) {
+ max_weight = weight;
+ max_index = i;
+ }
+ }
+ return max_index;
+}
+
+static struct objagg_tmp_graph *objagg_tmp_graph_create(struct objagg *objagg)
+{
+ unsigned int nodes_count = objagg->obj_count;
+ struct objagg_tmp_graph *graph;
+ struct objagg_tmp_node *node;
+ struct objagg_tmp_node *pnode;
+ struct objagg_obj *objagg_obj;
+ size_t alloc_size;
+ int i, j;
+
+ graph = kzalloc(sizeof(*graph), GFP_KERNEL);
+ if (!graph)
+ return NULL;
+
+ graph->nodes = kcalloc(nodes_count, sizeof(*graph->nodes), GFP_KERNEL);
+ if (!graph->nodes)
+ goto err_nodes_alloc;
+ graph->nodes_count = nodes_count;
+
+ alloc_size = BITS_TO_LONGS(nodes_count * nodes_count) *
+ sizeof(unsigned long);
+ graph->edges = kzalloc(alloc_size, GFP_KERNEL);
+ if (!graph->edges)
+ goto err_edges_alloc;
+
+ i = 0;
+ list_for_each_entry(objagg_obj, &objagg->obj_list, list) {
+ node = &graph->nodes[i++];
+ node->objagg_obj = objagg_obj;
+ }
+
+ /* Assemble a temporary graph. Insert edge X->Y in case Y can be
+ * in delta of X.
+ */
+ for (i = 0; i < nodes_count; i++) {
+ for (j = 0; j < nodes_count; j++) {
+ if (i == j)
+ continue;
+ pnode = &graph->nodes[i];
+ node = &graph->nodes[j];
+ if (objagg->ops->delta_check(objagg->priv,
+ pnode->objagg_obj->obj,
+ node->objagg_obj->obj)) {
+ objagg_tmp_graph_edge_set(graph, i, j);
+
+ }
+ }
+ }
+ return graph;
+
+err_edges_alloc:
+ kfree(graph->nodes);
+err_nodes_alloc:
+ kfree(graph);
+ return NULL;
+}
+
+static void objagg_tmp_graph_destroy(struct objagg_tmp_graph *graph)
+{
+ kfree(graph->edges);
+ kfree(graph->nodes);
+ kfree(graph);
+}
+
+static int
+objagg_opt_simple_greedy_fillup_hints(struct objagg_hints *objagg_hints,
+ struct objagg *objagg)
+{
+ struct objagg_hints_node *hnode, *parent_hnode;
+ struct objagg_tmp_graph *graph;
+ struct objagg_tmp_node *node;
+ int index;
+ int j;
+ int err;
+
+ graph = objagg_tmp_graph_create(objagg);
+ if (!graph)
+ return -ENOMEM;
+
+ /* Find the nodes from the ones that can accommodate most users
+ * and cross them out of the graph. Save them to the hint list.
+ */
+ while ((index = objagg_tmp_graph_node_max_weight(graph)) != -1) {
+ node = &graph->nodes[index];
+ node->crossed_out = true;
+ hnode = objagg_hints_node_create(objagg_hints,
+ node->objagg_obj,
+ objagg->ops->obj_size,
+ NULL);
+ if (IS_ERR(hnode)) {
+ err = PTR_ERR(hnode);
+ goto out;
+ }
+ parent_hnode = hnode;
+ for (j = 0; j < graph->nodes_count; j++) {
+ if (!objagg_tmp_graph_is_edge(graph, index, j))
+ continue;
+ node = &graph->nodes[j];
+ if (node->crossed_out)
+ continue;
+ node->crossed_out = true;
+ hnode = objagg_hints_node_create(objagg_hints,
+ node->objagg_obj,
+ objagg->ops->obj_size,
+ parent_hnode);
+ if (IS_ERR(hnode)) {
+ err = PTR_ERR(hnode);
+ goto out;
+ }
+ }
+ }
+
+ err = 0;
+out:
+ objagg_tmp_graph_destroy(graph);
+ return err;
+}
+
+struct objagg_opt_algo {
+ int (*fillup_hints)(struct objagg_hints *objagg_hints,
+ struct objagg *objagg);
+};
+
+static const struct objagg_opt_algo objagg_opt_simple_greedy = {
+ .fillup_hints = objagg_opt_simple_greedy_fillup_hints,
+};
+
+
+static const struct objagg_opt_algo *objagg_opt_algos[] = {
+ [OBJAGG_OPT_ALGO_SIMPLE_GREEDY] = &objagg_opt_simple_greedy,
+};
+
+static int objagg_hints_obj_cmp(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ struct rhashtable *ht = arg->ht;
+ struct objagg_hints *objagg_hints =
+ container_of(ht, struct objagg_hints, node_ht);
+ const struct objagg_ops *ops = objagg_hints->ops;
+ const char *ptr = obj;
+
+ ptr += ht->p.key_offset;
+ return ops->hints_obj_cmp ? ops->hints_obj_cmp(ptr, arg->key) :
+ memcmp(ptr, arg->key, ht->p.key_len);
+}
+
+/**
+ * objagg_hints_get - obtains hints instance
+ * @objagg: objagg instance
+ * @opt_algo_type: type of hints finding algorithm
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * According to the algo type, the existing objects of objagg instance
+ * are going to be went-through to assemble an optimal tree. We call this
+ * tree hints. These hints can be later on used for creation of
+ * a new objagg instance. There, the future object creations are going
+ * to be consulted with these hints in order to find out, where exactly
+ * the new object should be put as a root or delta.
+ *
+ * Returns a pointer to hints instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+struct objagg_hints *objagg_hints_get(struct objagg *objagg,
+ enum objagg_opt_algo_type opt_algo_type)
+{
+ const struct objagg_opt_algo *algo = objagg_opt_algos[opt_algo_type];
+ struct objagg_hints *objagg_hints;
+ int err;
+
+ objagg_hints = kzalloc(sizeof(*objagg_hints), GFP_KERNEL);
+ if (!objagg_hints)
+ return ERR_PTR(-ENOMEM);
+
+ objagg_hints->ops = objagg->ops;
+ objagg_hints->refcount = 1;
+
+ INIT_LIST_HEAD(&objagg_hints->node_list);
+
+ objagg_hints->ht_params.key_len = objagg->ops->obj_size;
+ objagg_hints->ht_params.key_offset =
+ offsetof(struct objagg_hints_node, obj);
+ objagg_hints->ht_params.head_offset =
+ offsetof(struct objagg_hints_node, ht_node);
+ objagg_hints->ht_params.obj_cmpfn = objagg_hints_obj_cmp;
+
+ err = rhashtable_init(&objagg_hints->node_ht, &objagg_hints->ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ err = algo->fillup_hints(objagg_hints, objagg);
+ if (err)
+ goto err_fillup_hints;
+
+ if (WARN_ON(objagg_hints->node_count != objagg->obj_count)) {
+ err = -EINVAL;
+ goto err_node_count_check;
+ }
+
+ return objagg_hints;
+
+err_node_count_check:
+err_fillup_hints:
+ objagg_hints_flush(objagg_hints);
+ rhashtable_destroy(&objagg_hints->node_ht);
+err_rhashtable_init:
+ kfree(objagg_hints);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(objagg_hints_get);
+
+/**
+ * objagg_hints_put - puts hints instance
+ * @objagg_hints: objagg hints instance
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void objagg_hints_put(struct objagg_hints *objagg_hints)
+{
+ if (--objagg_hints->refcount)
+ return;
+ objagg_hints_flush(objagg_hints);
+ rhashtable_destroy(&objagg_hints->node_ht);
+ kfree(objagg_hints);
+}
+EXPORT_SYMBOL(objagg_hints_put);
+
+/**
+ * objagg_hints_stats_get - obtains stats of the hints instance
+ * @objagg_hints: hints instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * The returned structure contains statistics of all objects
+ * currently in use, ordered by following rules:
+ * 1) Root objects are always on lower indexes than the rest.
+ * 2) Objects with higher delta user count are always on lower
+ * indexes.
+ * 3) In case multiple objects have the same delta user count,
+ * the objects are ordered by user count.
+ *
+ * Returns a pointer to stats instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+const struct objagg_stats *
+objagg_hints_stats_get(struct objagg_hints *objagg_hints)
+{
+ struct objagg_stats *objagg_stats;
+ struct objagg_hints_node *hnode;
+ int i;
+
+ objagg_stats = kzalloc(struct_size(objagg_stats, stats_info,
+ objagg_hints->node_count),
+ GFP_KERNEL);
+ if (!objagg_stats)
+ return ERR_PTR(-ENOMEM);
+
+ i = 0;
+ list_for_each_entry(hnode, &objagg_hints->node_list, list) {
+ memcpy(&objagg_stats->stats_info[i], &hnode->stats_info,
+ sizeof(objagg_stats->stats_info[0]));
+ if (objagg_stats->stats_info[i].is_root)
+ objagg_stats->root_count++;
+ i++;
+ }
+ objagg_stats->stats_info_count = i;
+
+ sort(objagg_stats->stats_info, objagg_stats->stats_info_count,
+ sizeof(struct objagg_obj_stats_info),
+ objagg_stats_info_sort_cmp_func, NULL);
+
+ return objagg_stats;
+}
+EXPORT_SYMBOL(objagg_hints_stats_get);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Object aggregation manager");
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index de10b8c0bff6..9877682e49c7 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -181,7 +181,7 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
percpu_ref_get(ref); /* put after confirmation */
- call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
+ call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
}
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 2f8b61dfd9b0..e723eacf7868 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -13,11 +13,25 @@ raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
hostprogs-y += mktables
quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \
- < $< > $@ || ( rm -f $@ && exit 1 )
+ cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) < $< > $@
ifeq ($(CONFIG_ALTIVEC),y)
altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
+
+ifdef CONFIG_CC_IS_CLANG
+# clang ppc port does not yet support -maltivec when -msoft-float is
+# enabled. A future release of clang will resolve this
+# https://bugs.llvm.org/show_bug.cgi?id=31177
+CFLAGS_REMOVE_altivec1.o += -msoft-float
+CFLAGS_REMOVE_altivec2.o += -msoft-float
+CFLAGS_REMOVE_altivec4.o += -msoft-float
+CFLAGS_REMOVE_altivec8.o += -msoft-float
+CFLAGS_REMOVE_altivec8.o += -msoft-float
+CFLAGS_REMOVE_vpermxor1.o += -msoft-float
+CFLAGS_REMOVE_vpermxor2.o += -msoft-float
+CFLAGS_REMOVE_vpermxor4.o += -msoft-float
+CFLAGS_REMOVE_vpermxor8.o += -msoft-float
+endif
endif
# The GCC option -ffreestanding is required in order to compile code containing
@@ -25,7 +39,7 @@ endif
ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
NEON_FLAGS := -ffreestanding
ifeq ($(ARCH),arm)
-NEON_FLAGS += -mfloat-abi=softfp -mfpu=neon
+NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon
endif
CFLAGS_recov_neon_inner.o += $(NEON_FLAGS)
ifeq ($(ARCH),arm64)
@@ -145,7 +159,7 @@ $(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
quiet_cmd_mktable = TABLE $@
- cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
+ cmd_mktable = $(obj)/mktables > $@
targets += tables.c
$(obj)/tables.c: $(obj)/mktables FORCE
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 5065b1e7e327..7e4f7a8ffa8e 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -34,64 +34,64 @@ struct raid6_calls raid6_call;
EXPORT_SYMBOL_GPL(raid6_call);
const struct raid6_calls * const raid6_algos[] = {
-#if defined(__ia64__)
- &raid6_intx16,
- &raid6_intx32,
-#endif
#if defined(__i386__) && !defined(__arch_um__)
- &raid6_mmxx1,
- &raid6_mmxx2,
- &raid6_sse1x1,
- &raid6_sse1x2,
- &raid6_sse2x1,
- &raid6_sse2x2,
-#ifdef CONFIG_AS_AVX2
- &raid6_avx2x1,
- &raid6_avx2x2,
-#endif
#ifdef CONFIG_AS_AVX512
- &raid6_avx512x1,
&raid6_avx512x2,
+ &raid6_avx512x1,
#endif
-#endif
-#if defined(__x86_64__) && !defined(__arch_um__)
- &raid6_sse2x1,
- &raid6_sse2x2,
- &raid6_sse2x4,
#ifdef CONFIG_AS_AVX2
- &raid6_avx2x1,
&raid6_avx2x2,
- &raid6_avx2x4,
+ &raid6_avx2x1,
+#endif
+ &raid6_sse2x2,
+ &raid6_sse2x1,
+ &raid6_sse1x2,
+ &raid6_sse1x1,
+ &raid6_mmxx2,
+ &raid6_mmxx1,
#endif
+#if defined(__x86_64__) && !defined(__arch_um__)
#ifdef CONFIG_AS_AVX512
- &raid6_avx512x1,
- &raid6_avx512x2,
&raid6_avx512x4,
+ &raid6_avx512x2,
+ &raid6_avx512x1,
#endif
+#ifdef CONFIG_AS_AVX2
+ &raid6_avx2x4,
+ &raid6_avx2x2,
+ &raid6_avx2x1,
+#endif
+ &raid6_sse2x4,
+ &raid6_sse2x2,
+ &raid6_sse2x1,
#endif
#ifdef CONFIG_ALTIVEC
- &raid6_altivec1,
- &raid6_altivec2,
- &raid6_altivec4,
- &raid6_altivec8,
- &raid6_vpermxor1,
- &raid6_vpermxor2,
- &raid6_vpermxor4,
&raid6_vpermxor8,
+ &raid6_vpermxor4,
+ &raid6_vpermxor2,
+ &raid6_vpermxor1,
+ &raid6_altivec8,
+ &raid6_altivec4,
+ &raid6_altivec2,
+ &raid6_altivec1,
#endif
#if defined(CONFIG_S390)
&raid6_s390vx8,
#endif
- &raid6_intx1,
- &raid6_intx2,
- &raid6_intx4,
- &raid6_intx8,
#ifdef CONFIG_KERNEL_MODE_NEON
- &raid6_neonx1,
- &raid6_neonx2,
- &raid6_neonx4,
&raid6_neonx8,
+ &raid6_neonx4,
+ &raid6_neonx2,
+ &raid6_neonx1,
#endif
+#if defined(__ia64__)
+ &raid6_intx32,
+ &raid6_intx16,
+#endif
+ &raid6_intx8,
+ &raid6_intx4,
+ &raid6_intx2,
+ &raid6_intx1,
NULL
};
@@ -163,6 +163,11 @@ static inline const struct raid6_calls *raid6_choose_gen(
if ((*algo)->valid && !(*algo)->valid())
continue;
+ if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
+ best = *algo;
+ break;
+ }
+
perf = 0;
preempt_disable();
diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc
index d5242f544551..b7c68030da4f 100644
--- a/lib/raid6/neon.uc
+++ b/lib/raid6/neon.uc
@@ -28,7 +28,6 @@
typedef uint8x16_t unative_t;
-#define NBYTES(x) ((unative_t){x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
#define NSIZE sizeof(unative_t)
/*
@@ -61,7 +60,7 @@ void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
int d, z, z0;
register unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
- const unative_t x1d = NBYTES(0x1d);
+ const unative_t x1d = vdupq_n_u8(0x1d);
z0 = disks - 3; /* Highest data disk */
p = dptr[z0+1]; /* XOR parity */
@@ -92,7 +91,7 @@ void raid6_neon$#_xor_syndrome_real(int disks, int start, int stop,
int d, z, z0;
register unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
- const unative_t x1d = NBYTES(0x1d);
+ const unative_t x1d = vdupq_n_u8(0x1d);
z0 = stop; /* P/Q right side optimization */
p = dptr[disks-2]; /* XOR parity */
diff --git a/lib/raid6/recov_neon_inner.c b/lib/raid6/recov_neon_inner.c
index 8cd20c9f834a..f13c07f82297 100644
--- a/lib/raid6/recov_neon_inner.c
+++ b/lib/raid6/recov_neon_inner.c
@@ -10,11 +10,6 @@
#include <arm_neon.h>
-static const uint8x16_t x0f = {
- 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
- 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
-};
-
#ifdef CONFIG_ARM
/*
* AArch32 does not provide this intrinsic natively because it does not
@@ -41,6 +36,7 @@ void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
uint8x16_t pm1 = vld1q_u8(pbmul + 16);
uint8x16_t qm0 = vld1q_u8(qmul);
uint8x16_t qm1 = vld1q_u8(qmul + 16);
+ uint8x16_t x0f = vdupq_n_u8(0x0f);
/*
* while ( bytes-- ) {
@@ -60,14 +56,14 @@ void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp,
px = veorq_u8(vld1q_u8(p), vld1q_u8(dp));
vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
- vy = (uint8x16_t)vshrq_n_s16((int16x8_t)vx, 4);
+ vy = vshrq_n_u8(vx, 4);
vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
- vy = vqtbl1q_u8(qm1, vandq_u8(vy, x0f));
+ vy = vqtbl1q_u8(qm1, vy);
qx = veorq_u8(vx, vy);
- vy = (uint8x16_t)vshrq_n_s16((int16x8_t)px, 4);
+ vy = vshrq_n_u8(px, 4);
vx = vqtbl1q_u8(pm0, vandq_u8(px, x0f));
- vy = vqtbl1q_u8(pm1, vandq_u8(vy, x0f));
+ vy = vqtbl1q_u8(pm1, vy);
vx = veorq_u8(vx, vy);
db = veorq_u8(vx, qx);
@@ -87,6 +83,7 @@ void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
{
uint8x16_t qm0 = vld1q_u8(qmul);
uint8x16_t qm1 = vld1q_u8(qmul + 16);
+ uint8x16_t x0f = vdupq_n_u8(0x0f);
/*
* while (bytes--) {
@@ -100,9 +97,9 @@ void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq,
vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq));
- vy = (uint8x16_t)vshrq_n_s16((int16x8_t)vx, 4);
+ vy = vshrq_n_u8(vx, 4);
vx = vqtbl1q_u8(qm0, vandq_u8(vx, x0f));
- vy = vqtbl1q_u8(qm1, vandq_u8(vy, x0f));
+ vy = vqtbl1q_u8(qm1, vy);
vx = veorq_u8(vx, vy);
vy = veorq_u8(vx, vld1q_u8(p));
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 79777645cac9..3ab8720aa2f8 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -34,6 +34,9 @@ endif
ifeq ($(IS_X86),yes)
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
+ CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \
+ gcc -c -x assembler - >&/dev/null && \
+ rm ./-.o && echo -DCONFIG_AS_SSSE3=1)
CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
gcc -c -x assembler - >&/dev/null && \
rm ./-.o && echo -DCONFIG_AS_AVX2=1)
diff --git a/lib/refcount.c b/lib/refcount.c
index ebcf8cd49e05..6e904af0fb3e 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -33,6 +33,9 @@
* Note that the allocator is responsible for ordering things between free()
* and alloc().
*
+ * The decrements dec_and_test() and sub_and_test() also provide acquire
+ * ordering on success.
+ *
*/
#include <linux/mutex.h>
@@ -164,8 +167,8 @@ EXPORT_SYMBOL(refcount_inc_checked);
* at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
- * before, and provides a control dependency such that free() must come after.
- * See the comment on top.
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
*
* Use of this function is not recommended for the normal reference counting
* use case in which references are taken and released one at a time. In these
@@ -190,7 +193,12 @@ bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
- return !new;
+ if (!new) {
+ smp_acquire__after_ctrl_dep();
+ return true;
+ }
+ return false;
+
}
EXPORT_SYMBOL(refcount_sub_and_test_checked);
@@ -202,8 +210,8 @@ EXPORT_SYMBOL(refcount_sub_and_test_checked);
* decrement when saturated at UINT_MAX.
*
* Provides release memory ordering, such that prior loads and stores are done
- * before, and provides a control dependency such that free() must come after.
- * See the comment on top.
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
*
* Return: true if the resulting refcount is 0, false otherwise
*/
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 30526afa8343..97f59abc3e92 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -416,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work)
else if (tbl->nest)
err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
- if (!err)
- err = rhashtable_rehash_table(ht);
+ if (!err || err == -EEXIST) {
+ int nerr;
+
+ nerr = rhashtable_rehash_table(ht);
+ err = err ?: nerr;
+ }
mutex_unlock(&ht->mutex);
@@ -682,7 +686,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
* rhashtable_walk_exit - Free an iterator
* @iter: Hash table Iterator
*
- * This function frees resources allocated by rhashtable_walk_init.
+ * This function frees resources allocated by rhashtable_walk_enter.
*/
void rhashtable_walk_exit(struct rhashtable_iter *iter)
{
@@ -1179,8 +1183,7 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
- static struct rhash_head __rcu *rhnull =
- (struct rhash_head __rcu *)NULLS_MARKER(0);
+ static struct rhash_head __rcu *rhnull;
unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest;
unsigned int subhash = hash;
@@ -1198,8 +1201,11 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
subhash >>= shift;
}
- if (!ntbl)
+ if (!ntbl) {
+ if (!rhnull)
+ INIT_RHT_NULLS_HEAD(rhnull);
return &rhnull;
+ }
return &ntbl[subhash].bucket;
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index fdd1b8aa8ac6..155fe38756ec 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -20,6 +20,40 @@
#include <linux/sbitmap.h>
#include <linux/seq_file.h>
+/*
+ * See if we have deferred clears that we can batch move
+ */
+static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
+{
+ unsigned long mask, val;
+ bool ret = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sb->map[index].swap_lock, flags);
+
+ if (!sb->map[index].cleared)
+ goto out_unlock;
+
+ /*
+ * First get a stable cleared mask, setting the old mask to 0.
+ */
+ do {
+ mask = sb->map[index].cleared;
+ } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
+
+ /*
+ * Now clear the masked bits in our free word
+ */
+ do {
+ val = sb->map[index].word;
+ } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
+
+ ret = true;
+out_unlock:
+ spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
+ return ret;
+}
+
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
gfp_t flags, int node)
{
@@ -59,6 +93,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
for (i = 0; i < sb->map_nr; i++) {
sb->map[i].depth = min(depth, bits_per_word);
depth -= sb->map[i].depth;
+ spin_lock_init(&sb->map[i].swap_lock);
}
return 0;
}
@@ -69,6 +104,9 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
unsigned int bits_per_word = 1U << sb->shift;
unsigned int i;
+ for (i = 0; i < sb->map_nr; i++)
+ sbitmap_deferred_clear(sb, i);
+
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
@@ -111,6 +149,24 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
return nr;
}
+static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
+ unsigned int alloc_hint, bool round_robin)
+{
+ int nr;
+
+ do {
+ nr = __sbitmap_get_word(&sb->map[index].word,
+ sb->map[index].depth, alloc_hint,
+ !round_robin);
+ if (nr != -1)
+ break;
+ if (!sbitmap_deferred_clear(sb, index))
+ break;
+ } while (1);
+
+ return nr;
+}
+
int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
{
unsigned int i, index;
@@ -118,24 +174,28 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
index = SB_NR_TO_INDEX(sb, alloc_hint);
+ /*
+ * Unless we're doing round robin tag allocation, just use the
+ * alloc_hint to find the right word index. No point in looping
+ * twice in find_next_zero_bit() for that case.
+ */
+ if (round_robin)
+ alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
+ else
+ alloc_hint = 0;
+
for (i = 0; i < sb->map_nr; i++) {
- nr = __sbitmap_get_word(&sb->map[index].word,
- sb->map[index].depth,
- SB_NR_TO_BIT(sb, alloc_hint),
- !round_robin);
+ nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
+ round_robin);
if (nr != -1) {
nr += index << sb->shift;
break;
}
/* Jump to next index. */
- index++;
- alloc_hint = index << sb->shift;
-
- if (index >= sb->map_nr) {
+ alloc_hint = 0;
+ if (++index >= sb->map_nr)
index = 0;
- alloc_hint = 0;
- }
}
return nr;
@@ -151,6 +211,7 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
index = SB_NR_TO_INDEX(sb, alloc_hint);
for (i = 0; i < sb->map_nr; i++) {
+again:
nr = __sbitmap_get_word(&sb->map[index].word,
min(sb->map[index].depth, shallow_depth),
SB_NR_TO_BIT(sb, alloc_hint), true);
@@ -159,6 +220,9 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
break;
}
+ if (sbitmap_deferred_clear(sb, index))
+ goto again;
+
/* Jump to next index. */
index++;
alloc_hint = index << sb->shift;
@@ -178,7 +242,7 @@ bool sbitmap_any_bit_set(const struct sbitmap *sb)
unsigned int i;
for (i = 0; i < sb->map_nr; i++) {
- if (sb->map[i].word)
+ if (sb->map[i].word & ~sb->map[i].cleared)
return true;
}
return false;
@@ -191,9 +255,10 @@ bool sbitmap_any_bit_clear(const struct sbitmap *sb)
for (i = 0; i < sb->map_nr; i++) {
const struct sbitmap_word *word = &sb->map[i];
+ unsigned long mask = word->word & ~word->cleared;
unsigned long ret;
- ret = find_first_zero_bit(&word->word, word->depth);
+ ret = find_first_zero_bit(&mask, word->depth);
if (ret < word->depth)
return true;
}
@@ -201,23 +266,36 @@ bool sbitmap_any_bit_clear(const struct sbitmap *sb)
}
EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
-unsigned int sbitmap_weight(const struct sbitmap *sb)
+static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
{
unsigned int i, weight = 0;
for (i = 0; i < sb->map_nr; i++) {
const struct sbitmap_word *word = &sb->map[i];
- weight += bitmap_weight(&word->word, word->depth);
+ if (set)
+ weight += bitmap_weight(&word->word, word->depth);
+ else
+ weight += bitmap_weight(&word->cleared, word->depth);
}
return weight;
}
-EXPORT_SYMBOL_GPL(sbitmap_weight);
+
+static unsigned int sbitmap_weight(const struct sbitmap *sb)
+{
+ return __sbitmap_weight(sb, true);
+}
+
+static unsigned int sbitmap_cleared(const struct sbitmap *sb)
+{
+ return __sbitmap_weight(sb, false);
+}
void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
{
seq_printf(m, "depth=%u\n", sb->depth);
- seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
+ seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
+ seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
seq_printf(m, "map_nr=%u\n", sb->map_nr);
}
@@ -325,6 +403,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
sbq->min_shallow_depth = UINT_MAX;
sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
+ atomic_set(&sbq->ws_active, 0);
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
if (!sbq->ws) {
@@ -440,6 +519,9 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
{
int i, wake_index;
+ if (!atomic_read(&sbq->ws_active))
+ return NULL;
+
wake_index = atomic_read(&sbq->wake_index);
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[wake_index];
@@ -509,7 +591,19 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu)
{
- sbitmap_clear_bit_unlock(&sbq->sb, nr);
+ /*
+ * Once the clear bit is set, the bit may be allocated out.
+ *
+ * Orders READ/WRITE on the asssociated instance(such as request
+ * of blk_mq) by this bit for avoiding race with re-allocation,
+ * and its pair is the memory barrier implied in __sbitmap_get_word.
+ *
+ * One invariant is that the clear bit has to be zero when the bit
+ * is in use.
+ */
+ smp_mb__before_atomic();
+ sbitmap_deferred_clear_bit(&sbq->sb, nr);
+
/*
* Pairs with the memory barrier in set_current_state() to ensure the
* proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
@@ -564,6 +658,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
+ seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
seq_puts(m, "ws={\n");
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
@@ -579,3 +674,48 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_show);
+
+void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait)
+{
+ if (!sbq_wait->sbq) {
+ sbq_wait->sbq = sbq;
+ atomic_inc(&sbq->ws_active);
+ }
+ add_wait_queue(&ws->wait, &sbq_wait->wait);
+}
+EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
+
+void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
+{
+ list_del_init(&sbq_wait->wait.entry);
+ if (sbq_wait->sbq) {
+ atomic_dec(&sbq_wait->sbq->ws_active);
+ sbq_wait->sbq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
+
+void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait, int state)
+{
+ if (!sbq_wait->sbq) {
+ atomic_inc(&sbq->ws_active);
+ sbq_wait->sbq = sbq;
+ }
+ prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
+}
+EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
+
+void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait)
+{
+ finish_wait(&ws->wait, &sbq_wait->wait);
+ if (sbq_wait->sbq) {
+ atomic_dec(&sbq->ws_active);
+ sbq_wait->sbq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(sbitmap_finish_wait);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 7c6096a71704..739dc9fe2c55 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -271,7 +271,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
if (nents == 0)
return -EINVAL;
-#ifndef CONFIG_ARCH_HAS_SG_CHAIN
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
if (WARN_ON_ONCE(nents > max_ents))
return -EINVAL;
#endif
@@ -625,6 +625,32 @@ bool __sg_page_iter_next(struct sg_page_iter *piter)
}
EXPORT_SYMBOL(__sg_page_iter_next);
+static int sg_dma_page_count(struct scatterlist *sg)
+{
+ return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
+}
+
+bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
+{
+ struct sg_page_iter *piter = &dma_iter->base;
+
+ if (!piter->__nents || !piter->sg)
+ return false;
+
+ piter->sg_pgoffset += piter->__pg_advance;
+ piter->__pg_advance = 1;
+
+ while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
+ piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
+ piter->sg = sg_next(piter->sg);
+ if (!--piter->__nents || !piter->sg)
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(__sg_page_iter_dma_next);
+
/**
* sg_miter_start - start mapping iteration over a sg list
* @miter: sg mapping iter to be started
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 11f2ae0f9099..bd807f545a9d 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -140,13 +140,17 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
*/
int seq_buf_puts(struct seq_buf *s, const char *str)
{
- unsigned int len = strlen(str);
+ size_t len = strlen(str);
WARN_ON(s->size == 0);
+ /* Add 1 to len for the trailing null byte which must be there */
+ len += 1;
+
if (seq_buf_can_fit(s, len)) {
memcpy(s->buffer + s->len, str, len);
- s->len += len;
+ /* Don't count the trailing null byte against the capacity */
+ s->len += len - 1;
return 0;
}
seq_buf_set_overflow(s);
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 0beaa1d899aa..6a042f53e7bb 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -18,22 +18,19 @@ void show_mem(unsigned int filter, nodemask_t *nodemask)
show_free_areas(filter, nodemask);
for_each_online_pgdat(pgdat) {
- unsigned long flags;
int zoneid;
- pgdat_resize_lock(pgdat, &flags);
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
struct zone *zone = &pgdat->node_zones[zoneid];
if (!populated_zone(zone))
continue;
total += zone->present_pages;
- reserved += zone->present_pages - zone->managed_pages;
+ reserved += zone->present_pages - zone_managed_pages(zone);
if (is_highmem_idx(zoneid))
highmem += zone->present_pages;
}
- pgdat_resize_unlock(pgdat, &flags);
}
printk("%lu pages RAM\n", total);
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 85925aaa4fff..157d9e31f6c2 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -5,10 +5,11 @@
* DEBUG_PREEMPT variant of smp_processor_id().
*/
#include <linux/export.h>
+#include <linux/kprobes.h>
#include <linux/sched.h>
-notrace static unsigned int check_preemption_disabled(const char *what1,
- const char *what2)
+notrace static nokprobe_inline
+unsigned int check_preemption_disabled(const char *what1, const char *what2)
{
int this_cpu = raw_smp_processor_id();
@@ -56,9 +57,11 @@ notrace unsigned int debug_smp_processor_id(void)
return check_preemption_disabled("smp_processor_id", "");
}
EXPORT_SYMBOL(debug_smp_processor_id);
+NOKPROBE_SYMBOL(debug_smp_processor_id);
notrace void __this_cpu_preempt_check(const char *op)
{
check_preemption_disabled("__this_cpu_", op);
}
EXPORT_SYMBOL(__this_cpu_preempt_check);
+NOKPROBE_SYMBOL(__this_cpu_preempt_check);
diff --git a/lib/string.c b/lib/string.c
index 38e4ca08e757..3ab861c1a857 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -866,6 +866,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
EXPORT_SYMBOL(memcmp);
#endif
+#ifndef __HAVE_ARCH_BCMP
+/**
+ * bcmp - returns 0 if and only if the buffers have identical contents.
+ * @a: pointer to first buffer.
+ * @b: pointer to second buffer.
+ * @len: size of buffers.
+ *
+ * The sign or magnitude of a non-zero return value has no particular
+ * meaning, and architectures may implement their own more efficient bcmp(). So
+ * while this particular implementation is a simple (tail) call to memcmp, do
+ * not rely on anything but whether the return value is zero or non-zero.
+ */
+#undef bcmp
+int bcmp(const void *a, const void *b, size_t len)
+{
+ return memcmp(a, b, len);
+}
+EXPORT_SYMBOL(bcmp);
+#endif
+
#ifndef __HAVE_ARCH_MEMSCAN
/**
* memscan - Find a character in an area of memory.
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index b53e1b5d80f4..58eacd41526c 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -114,10 +114,11 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
kasan_check_write(dst, count);
check_object_size(dst, count, false);
- user_access_begin();
- retval = do_strncpy_from_user(dst, src, count, max);
- user_access_end();
- return retval;
+ if (user_access_begin(src, max)) {
+ retval = do_strncpy_from_user(dst, src, count, max);
+ user_access_end();
+ return retval;
+ }
}
return -EFAULT;
}
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 60d0bbda8f5e..1c1a1b0e38a5 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -114,10 +114,11 @@ long strnlen_user(const char __user *str, long count)
unsigned long max = max_addr - src_addr;
long retval;
- user_access_begin();
- retval = do_strnlen_user(str, count, max);
- user_access_end();
- return retval;
+ if (user_access_begin(str, max)) {
+ retval = do_strnlen_user(str, count, max);
+ user_access_end();
+ return retval;
+ }
}
return 0;
}
diff --git a/lib/syscall.c b/lib/syscall.c
index 1a7077f20eae..fb328e7ccb08 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -5,16 +5,14 @@
#include <linux/export.h>
#include <asm/syscall.h>
-static int collect_syscall(struct task_struct *target, long *callno,
- unsigned long args[6], unsigned int maxargs,
- unsigned long *sp, unsigned long *pc)
+static int collect_syscall(struct task_struct *target, struct syscall_info *info)
{
struct pt_regs *regs;
if (!try_get_task_stack(target)) {
/* Task has no stack, so the task isn't in a syscall. */
- *sp = *pc = 0;
- *callno = -1;
+ memset(info, 0, sizeof(*info));
+ info->data.nr = -1;
return 0;
}
@@ -24,12 +22,13 @@ static int collect_syscall(struct task_struct *target, long *callno,
return -EAGAIN;
}
- *sp = user_stack_pointer(regs);
- *pc = instruction_pointer(regs);
+ info->sp = user_stack_pointer(regs);
+ info->data.instruction_pointer = instruction_pointer(regs);
- *callno = syscall_get_nr(target, regs);
- if (*callno != -1L && maxargs > 0)
- syscall_get_arguments(target, regs, 0, maxargs, args);
+ info->data.nr = syscall_get_nr(target, regs);
+ if (info->data.nr != -1L)
+ syscall_get_arguments(target, regs,
+ (unsigned long *)&info->data.args[0]);
put_task_stack(target);
return 0;
@@ -38,41 +37,35 @@ static int collect_syscall(struct task_struct *target, long *callno,
/**
* task_current_syscall - Discover what a blocked task is doing.
* @target: thread to examine
- * @callno: filled with system call number or -1
- * @args: filled with @maxargs system call arguments
- * @maxargs: number of elements in @args to fill
- * @sp: filled with user stack pointer
- * @pc: filled with user PC
+ * @info: structure with the following fields:
+ * .sp - filled with user stack pointer
+ * .data.nr - filled with system call number or -1
+ * .data.args - filled with @maxargs system call arguments
+ * .data.instruction_pointer - filled with user PC
*
- * If @target is blocked in a system call, returns zero with *@callno
- * set to the the call's number and @args filled in with its arguments.
- * Registers not used for system call arguments may not be available and
- * it is not kosher to use &struct user_regset calls while the system
+ * If @target is blocked in a system call, returns zero with @info.data.nr
+ * set to the the call's number and @info.data.args filled in with its
+ * arguments. Registers not used for system call arguments may not be available
+ * and it is not kosher to use &struct user_regset calls while the system
* call is still in progress. Note we may get this result if @target
* has finished its system call but not yet returned to user mode, such
* as when it's stopped for signal handling or syscall exit tracing.
*
* If @target is blocked in the kernel during a fault or exception,
- * returns zero with *@callno set to -1 and does not fill in @args.
- * If so, it's now safe to examine @target using &struct user_regset
- * get() calls as long as we're sure @target won't return to user mode.
+ * returns zero with *@info.data.nr set to -1 and does not fill in
+ * @info.data.args. If so, it's now safe to examine @target using
+ * &struct user_regset get() calls as long as we're sure @target won't return
+ * to user mode.
*
* Returns -%EAGAIN if @target does not remain blocked.
- *
- * Returns -%EINVAL if @maxargs is too large (maximum is six).
*/
-int task_current_syscall(struct task_struct *target, long *callno,
- unsigned long args[6], unsigned int maxargs,
- unsigned long *sp, unsigned long *pc)
+int task_current_syscall(struct task_struct *target, struct syscall_info *info)
{
long state;
unsigned long ncsw;
- if (unlikely(maxargs > 6))
- return -EINVAL;
-
if (target == current)
- return collect_syscall(target, callno, args, maxargs, sp, pc);
+ return collect_syscall(target, info);
state = target->state;
if (unlikely(!state))
@@ -80,7 +73,7 @@ int task_current_syscall(struct task_struct *target, long *callno,
ncsw = wait_task_inactive(target, state);
if (unlikely(!ncsw) ||
- unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) ||
+ unlikely(collect_syscall(target, info)) ||
unlikely(wait_task_inactive(target, state) != ncsw))
return -EAGAIN;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index aa22bcaec1dc..0845f635f404 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -39,6 +39,7 @@
#define SKB_HASH 0x1234aaab
#define SKB_QUEUE_MAP 123
#define SKB_VLAN_TCI 0xffff
+#define SKB_VLAN_PRESENT 1
#define SKB_DEV_IFINDEX 577
#define SKB_DEV_TYPE 588
@@ -725,8 +726,8 @@ static struct bpf_test tests[] = {
CLASSIC,
{ },
{
- { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
- { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }
+ { 1, SKB_VLAN_TCI },
+ { 10, SKB_VLAN_TCI }
},
},
{
@@ -739,8 +740,8 @@ static struct bpf_test tests[] = {
CLASSIC,
{ },
{
- { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
- { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+ { 1, SKB_VLAN_PRESENT },
+ { 10, SKB_VLAN_PRESENT }
},
},
{
@@ -5289,8 +5290,8 @@ static struct bpf_test tests[] = {
#endif
{ },
{
- { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
- { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+ { 1, SKB_VLAN_PRESENT },
+ { 10, SKB_VLAN_PRESENT }
},
.fill_helper = bpf_fill_maxinsns6,
.expected_errcode = -ENOTSUPP,
@@ -6493,6 +6494,7 @@ static struct sk_buff *populate_skb(char *buf, int size)
skb->hash = SKB_HASH;
skb->queue_mapping = SKB_QUEUE_MAP;
skb->vlan_tci = SKB_VLAN_TCI;
+ skb->vlan_present = SKB_VLAN_PRESENT;
skb->vlan_proto = htons(ETH_P_IP);
dev_net_set(&dev, &init_net);
skb->dev = &dev;
@@ -6666,12 +6668,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
u64 start, finish;
int ret = 0, i;
+ preempt_disable();
start = ktime_get_ns();
for (i = 0; i < runs; i++)
ret = BPF_PROG_RUN(fp, data);
finish = ktime_get_ns();
+ preempt_enable();
*duration = finish - start;
do_div(*duration, runs);
diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
index d5a06addeb27..bf864c73e462 100644
--- a/lib/test_debug_virtual.c
+++ b/lib/test_debug_virtual.c
@@ -5,6 +5,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/sizes.h>
+#include <linux/io.h>
#include <asm/page.h>
#ifdef CONFIG_MIPS
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 7cab9a9869ac..7222093ee00b 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -631,11 +631,6 @@ static ssize_t trigger_batched_requests_store(struct device *dev,
for (i = 0; i < test_fw_config->num_requests; i++) {
req = &test_fw_config->reqs[i];
- if (!req) {
- WARN_ON(1);
- rc = -ENOMEM;
- goto out_bail;
- }
req->fw = NULL;
req->idx = i;
req->name = test_fw_config->name;
@@ -737,10 +732,6 @@ ssize_t trigger_batched_requests_async_store(struct device *dev,
for (i = 0; i < test_fw_config->num_requests; i++) {
req = &test_fw_config->reqs[i];
- if (!req) {
- WARN_ON(1);
- goto out_bail;
- }
req->name = test_fw_config->name;
req->fw = NULL;
req->idx = i;
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 51b78405bf24..7de2702621dc 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -480,29 +480,6 @@ static noinline void __init copy_user_test(void)
kfree(kmem);
}
-static noinline void __init use_after_scope_test(void)
-{
- volatile char *volatile p;
-
- pr_info("use-after-scope on int\n");
- {
- int local = 0;
-
- p = (char *)&local;
- }
- p[0] = 1;
- p[3] = 1;
-
- pr_info("use-after-scope on array\n");
- {
- char local[1024] = {0};
-
- p = local;
- }
- p[0] = 1;
- p[1023] = 1;
-}
-
static noinline void __init kasan_alloca_oob_left(void)
{
volatile int i = 10;
@@ -682,7 +659,6 @@ static int __init kmalloc_tests_init(void)
kasan_alloca_oob_right();
ksize_unpoisons_memory();
copy_user_test();
- use_after_scope_test();
kmem_cache_double_free();
kmem_cache_invalid_free();
kasan_memchr();
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index d82d022111e0..9cf77628fc91 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -632,7 +632,7 @@ static void __kmod_config_free(struct test_config *config)
config->test_driver = NULL;
kfree_const(config->test_fs);
- config->test_driver = NULL;
+ config->test_fs = NULL;
}
static void kmod_config_free(struct kmod_test_device *test_dev)
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
new file mode 100644
index 000000000000..72c1abfa154d
--- /dev/null
+++ b/lib/test_objagg.c
@@ -0,0 +1,1021 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/objagg.h>
+
+struct tokey {
+ unsigned int id;
+};
+
+#define NUM_KEYS 32
+
+static int key_id_index(unsigned int key_id)
+{
+ if (key_id >= NUM_KEYS) {
+ WARN_ON(1);
+ return 0;
+ }
+ return key_id;
+}
+
+#define BUF_LEN 128
+
+struct world {
+ unsigned int root_count;
+ unsigned int delta_count;
+ char next_root_buf[BUF_LEN];
+ struct objagg_obj *objagg_objs[NUM_KEYS];
+ unsigned int key_refs[NUM_KEYS];
+};
+
+struct root {
+ struct tokey key;
+ char buf[BUF_LEN];
+};
+
+struct delta {
+ unsigned int key_id_diff;
+};
+
+static struct objagg_obj *world_obj_get(struct world *world,
+ struct objagg *objagg,
+ unsigned int key_id)
+{
+ struct objagg_obj *objagg_obj;
+ struct tokey key;
+ int err;
+
+ key.id = key_id;
+ objagg_obj = objagg_obj_get(objagg, &key);
+ if (IS_ERR(objagg_obj)) {
+ pr_err("Key %u: Failed to get object.\n", key_id);
+ return objagg_obj;
+ }
+ if (!world->key_refs[key_id_index(key_id)]) {
+ world->objagg_objs[key_id_index(key_id)] = objagg_obj;
+ } else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) {
+ pr_err("Key %u: God another object for the same key.\n",
+ key_id);
+ err = -EINVAL;
+ goto err_key_id_check;
+ }
+ world->key_refs[key_id_index(key_id)]++;
+ return objagg_obj;
+
+err_key_id_check:
+ objagg_obj_put(objagg, objagg_obj);
+ return ERR_PTR(err);
+}
+
+static void world_obj_put(struct world *world, struct objagg *objagg,
+ unsigned int key_id)
+{
+ struct objagg_obj *objagg_obj;
+
+ if (!world->key_refs[key_id_index(key_id)])
+ return;
+ objagg_obj = world->objagg_objs[key_id_index(key_id)];
+ objagg_obj_put(objagg, objagg_obj);
+ world->key_refs[key_id_index(key_id)]--;
+}
+
+#define MAX_KEY_ID_DIFF 5
+
+static bool delta_check(void *priv, const void *parent_obj, const void *obj)
+{
+ const struct tokey *parent_key = parent_obj;
+ const struct tokey *key = obj;
+ int diff = key->id - parent_key->id;
+
+ return diff >= 0 && diff <= MAX_KEY_ID_DIFF;
+}
+
+static void *delta_create(void *priv, void *parent_obj, void *obj)
+{
+ struct tokey *parent_key = parent_obj;
+ struct world *world = priv;
+ struct tokey *key = obj;
+ int diff = key->id - parent_key->id;
+ struct delta *delta;
+
+ if (!delta_check(priv, parent_obj, obj))
+ return ERR_PTR(-EINVAL);
+
+ delta = kzalloc(sizeof(*delta), GFP_KERNEL);
+ if (!delta)
+ return ERR_PTR(-ENOMEM);
+ delta->key_id_diff = diff;
+ world->delta_count++;
+ return delta;
+}
+
+static void delta_destroy(void *priv, void *delta_priv)
+{
+ struct delta *delta = delta_priv;
+ struct world *world = priv;
+
+ world->delta_count--;
+ kfree(delta);
+}
+
+static void *root_create(void *priv, void *obj, unsigned int id)
+{
+ struct world *world = priv;
+ struct tokey *key = obj;
+ struct root *root;
+
+ root = kzalloc(sizeof(*root), GFP_KERNEL);
+ if (!root)
+ return ERR_PTR(-ENOMEM);
+ memcpy(&root->key, key, sizeof(root->key));
+ memcpy(root->buf, world->next_root_buf, sizeof(root->buf));
+ world->root_count++;
+ return root;
+}
+
+static void root_destroy(void *priv, void *root_priv)
+{
+ struct root *root = root_priv;
+ struct world *world = priv;
+
+ world->root_count--;
+ kfree(root);
+}
+
+static int test_nodelta_obj_get(struct world *world, struct objagg *objagg,
+ unsigned int key_id, bool should_create_root)
+{
+ unsigned int orig_root_count = world->root_count;
+ struct objagg_obj *objagg_obj;
+ const struct root *root;
+ int err;
+
+ if (should_create_root)
+ prandom_bytes(world->next_root_buf,
+ sizeof(world->next_root_buf));
+
+ objagg_obj = world_obj_get(world, objagg, key_id);
+ if (IS_ERR(objagg_obj)) {
+ pr_err("Key %u: Failed to get object.\n", key_id);
+ return PTR_ERR(objagg_obj);
+ }
+ if (should_create_root) {
+ if (world->root_count != orig_root_count + 1) {
+ pr_err("Key %u: Root was not created\n", key_id);
+ err = -EINVAL;
+ goto err_check_root_count;
+ }
+ } else {
+ if (world->root_count != orig_root_count) {
+ pr_err("Key %u: Root was incorrectly created\n",
+ key_id);
+ err = -EINVAL;
+ goto err_check_root_count;
+ }
+ }
+ root = objagg_obj_root_priv(objagg_obj);
+ if (root->key.id != key_id) {
+ pr_err("Key %u: Root has unexpected key id\n", key_id);
+ err = -EINVAL;
+ goto err_check_key_id;
+ }
+ if (should_create_root &&
+ memcmp(world->next_root_buf, root->buf, sizeof(root->buf))) {
+ pr_err("Key %u: Buffer does not match the expected content\n",
+ key_id);
+ err = -EINVAL;
+ goto err_check_buf;
+ }
+ return 0;
+
+err_check_buf:
+err_check_key_id:
+err_check_root_count:
+ objagg_obj_put(objagg, objagg_obj);
+ return err;
+}
+
+static int test_nodelta_obj_put(struct world *world, struct objagg *objagg,
+ unsigned int key_id, bool should_destroy_root)
+{
+ unsigned int orig_root_count = world->root_count;
+
+ world_obj_put(world, objagg, key_id);
+
+ if (should_destroy_root) {
+ if (world->root_count != orig_root_count - 1) {
+ pr_err("Key %u: Root was not destroyed\n", key_id);
+ return -EINVAL;
+ }
+ } else {
+ if (world->root_count != orig_root_count) {
+ pr_err("Key %u: Root was incorrectly destroyed\n",
+ key_id);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int check_stats_zero(struct objagg *objagg)
+{
+ const struct objagg_stats *stats;
+ int err = 0;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats))
+ return PTR_ERR(stats);
+
+ if (stats->stats_info_count != 0) {
+ pr_err("Stats: Object count is not zero while it should be\n");
+ err = -EINVAL;
+ }
+
+ objagg_stats_put(stats);
+ return err;
+}
+
+static int check_stats_nodelta(struct objagg *objagg)
+{
+ const struct objagg_stats *stats;
+ int i;
+ int err;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats))
+ return PTR_ERR(stats);
+
+ if (stats->stats_info_count != NUM_KEYS) {
+ pr_err("Stats: Unexpected object count (%u expected, %u returned)\n",
+ NUM_KEYS, stats->stats_info_count);
+ err = -EINVAL;
+ goto stats_put;
+ }
+
+ for (i = 0; i < stats->stats_info_count; i++) {
+ if (stats->stats_info[i].stats.user_count != 2) {
+ pr_err("Stats: incorrect user count\n");
+ err = -EINVAL;
+ goto stats_put;
+ }
+ if (stats->stats_info[i].stats.delta_user_count != 2) {
+ pr_err("Stats: incorrect delta user count\n");
+ err = -EINVAL;
+ goto stats_put;
+ }
+ }
+ err = 0;
+
+stats_put:
+ objagg_stats_put(stats);
+ return err;
+}
+
+static bool delta_check_dummy(void *priv, const void *parent_obj,
+ const void *obj)
+{
+ return false;
+}
+
+static void *delta_create_dummy(void *priv, void *parent_obj, void *obj)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static void delta_destroy_dummy(void *priv, void *delta_priv)
+{
+}
+
+static const struct objagg_ops nodelta_ops = {
+ .obj_size = sizeof(struct tokey),
+ .delta_check = delta_check_dummy,
+ .delta_create = delta_create_dummy,
+ .delta_destroy = delta_destroy_dummy,
+ .root_create = root_create,
+ .root_destroy = root_destroy,
+};
+
+static int test_nodelta(void)
+{
+ struct world world = {};
+ struct objagg *objagg;
+ int i;
+ int err;
+
+ objagg = objagg_create(&nodelta_ops, NULL, &world);
+ if (IS_ERR(objagg))
+ return PTR_ERR(objagg);
+
+ err = check_stats_zero(objagg);
+ if (err)
+ goto err_stats_first_zero;
+
+ /* First round of gets, the root objects should be created */
+ for (i = 0; i < NUM_KEYS; i++) {
+ err = test_nodelta_obj_get(&world, objagg, i, true);
+ if (err)
+ goto err_obj_first_get;
+ }
+
+ /* Do the second round of gets, all roots are already created,
+ * make sure that no new root is created
+ */
+ for (i = 0; i < NUM_KEYS; i++) {
+ err = test_nodelta_obj_get(&world, objagg, i, false);
+ if (err)
+ goto err_obj_second_get;
+ }
+
+ err = check_stats_nodelta(objagg);
+ if (err)
+ goto err_stats_nodelta;
+
+ for (i = NUM_KEYS - 1; i >= 0; i--) {
+ err = test_nodelta_obj_put(&world, objagg, i, false);
+ if (err)
+ goto err_obj_first_put;
+ }
+ for (i = NUM_KEYS - 1; i >= 0; i--) {
+ err = test_nodelta_obj_put(&world, objagg, i, true);
+ if (err)
+ goto err_obj_second_put;
+ }
+
+ err = check_stats_zero(objagg);
+ if (err)
+ goto err_stats_second_zero;
+
+ objagg_destroy(objagg);
+ return 0;
+
+err_stats_nodelta:
+err_obj_first_put:
+err_obj_second_get:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world, objagg, i);
+
+ i = NUM_KEYS;
+err_obj_first_get:
+err_obj_second_put:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world, objagg, i);
+err_stats_first_zero:
+err_stats_second_zero:
+ objagg_destroy(objagg);
+ return err;
+}
+
+static const struct objagg_ops delta_ops = {
+ .obj_size = sizeof(struct tokey),
+ .delta_check = delta_check,
+ .delta_create = delta_create,
+ .delta_destroy = delta_destroy,
+ .root_create = root_create,
+ .root_destroy = root_destroy,
+};
+
+enum action {
+ ACTION_GET,
+ ACTION_PUT,
+};
+
+enum expect_delta {
+ EXPECT_DELTA_SAME,
+ EXPECT_DELTA_INC,
+ EXPECT_DELTA_DEC,
+};
+
+enum expect_root {
+ EXPECT_ROOT_SAME,
+ EXPECT_ROOT_INC,
+ EXPECT_ROOT_DEC,
+};
+
+struct expect_stats_info {
+ struct objagg_obj_stats stats;
+ bool is_root;
+ unsigned int key_id;
+};
+
+struct expect_stats {
+ unsigned int info_count;
+ struct expect_stats_info info[NUM_KEYS];
+};
+
+struct action_item {
+ unsigned int key_id;
+ enum action action;
+ enum expect_delta expect_delta;
+ enum expect_root expect_root;
+ struct expect_stats expect_stats;
+};
+
+#define EXPECT_STATS(count, ...) \
+{ \
+ .info_count = count, \
+ .info = { __VA_ARGS__ } \
+}
+
+#define ROOT(key_id, user_count, delta_user_count) \
+ {{user_count, delta_user_count}, true, key_id}
+
+#define DELTA(key_id, user_count) \
+ {{user_count, user_count}, false, key_id}
+
+static const struct action_item action_items[] = {
+ {
+ 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(1, ROOT(1, 1, 1)),
+ }, /* r: 1 d: */
+ {
+ 7, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(2, ROOT(1, 1, 1), ROOT(7, 1, 1)),
+ }, /* r: 1, 7 d: */
+ {
+ 3, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(3, ROOT(1, 1, 2), ROOT(7, 1, 1),
+ DELTA(3, 1)),
+ }, /* r: 1, 7 d: 3^1 */
+ {
+ 5, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(1, 1, 3), ROOT(7, 1, 1),
+ DELTA(3, 1), DELTA(5, 1)),
+ }, /* r: 1, 7 d: 3^1, 5^1 */
+ {
+ 3, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(1, 1, 4), ROOT(7, 1, 1),
+ DELTA(3, 2), DELTA(5, 1)),
+ }, /* r: 1, 7 d: 3^1, 3^1, 5^1 */
+ {
+ 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(1, 2, 5), ROOT(7, 1, 1),
+ DELTA(3, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7 d: 3^1, 3^1, 5^1 */
+ {
+ 30, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(5, ROOT(1, 2, 5), ROOT(7, 1, 1), ROOT(30, 1, 1),
+ DELTA(3, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 2), ROOT(30, 1, 1),
+ DELTA(3, 2), DELTA(5, 1), DELTA(8, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(3, 2), DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7, 8^7 */
+ {
+ 3, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(6, ROOT(1, 2, 4), ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(3, 1), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 5^1, 8^7, 8^7 */
+ {
+ 3, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(1, 2, 3), ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 5^1, 8^7, 8^7 */
+ {
+ 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(1, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 1, 7, 30 d: 5^1, 8^7, 8^7 */
+ {
+ 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(1, 0, 1),
+ DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 7, 30 d: 5^1, 8^7, 8^7 */
+ {
+ 5, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
+ EXPECT_STATS(3, ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(8, 2)),
+ }, /* r: 7, 30 d: 8^7, 8^7 */
+ {
+ 5, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(4, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(5, 1, 1),
+ DELTA(8, 2)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7 */
+ {
+ 6, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 4), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 3), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7, 8^7, 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 2), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 1), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(5, 1, 2), ROOT(7, 1, 1), ROOT(30, 1, 1),
+ DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 6^5 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(5, 1, 3), ROOT(7, 1, 1), ROOT(30, 1, 1),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: 7, 30, 5 d: 6^5, 8^5 */
+ {
+ 7, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
+ EXPECT_STATS(4, ROOT(5, 1, 3), ROOT(30, 1, 1),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: 30, 5 d: 6^5, 8^5 */
+ {
+ 30, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
+ EXPECT_STATS(3, ROOT(5, 1, 3),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: 5 d: 6^5, 8^5 */
+ {
+ 5, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(3, ROOT(5, 0, 2),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: d: 6^5, 8^5 */
+ {
+ 6, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(2, ROOT(5, 0, 1),
+ DELTA(8, 1)),
+ }, /* r: d: 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
+ EXPECT_STATS(0, ),
+ }, /* r: d: */
+};
+
+static int check_expect(struct world *world,
+ const struct action_item *action_item,
+ unsigned int orig_delta_count,
+ unsigned int orig_root_count)
+{
+ unsigned int key_id = action_item->key_id;
+
+ switch (action_item->expect_delta) {
+ case EXPECT_DELTA_SAME:
+ if (orig_delta_count != world->delta_count) {
+ pr_err("Key %u: Delta count changed while expected to remain the same.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_DELTA_INC:
+ if (WARN_ON(action_item->action == ACTION_PUT))
+ return -EINVAL;
+ if (orig_delta_count + 1 != world->delta_count) {
+ pr_err("Key %u: Delta count was not incremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_DELTA_DEC:
+ if (WARN_ON(action_item->action == ACTION_GET))
+ return -EINVAL;
+ if (orig_delta_count - 1 != world->delta_count) {
+ pr_err("Key %u: Delta count was not decremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ switch (action_item->expect_root) {
+ case EXPECT_ROOT_SAME:
+ if (orig_root_count != world->root_count) {
+ pr_err("Key %u: Root count changed while expected to remain the same.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_ROOT_INC:
+ if (WARN_ON(action_item->action == ACTION_PUT))
+ return -EINVAL;
+ if (orig_root_count + 1 != world->root_count) {
+ pr_err("Key %u: Root count was not incremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_ROOT_DEC:
+ if (WARN_ON(action_item->action == ACTION_GET))
+ return -EINVAL;
+ if (orig_root_count - 1 != world->root_count) {
+ pr_err("Key %u: Root count was not decremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static unsigned int obj_to_key_id(struct objagg_obj *objagg_obj)
+{
+ const struct tokey *root_key;
+ const struct delta *delta;
+ unsigned int key_id;
+
+ root_key = objagg_obj_root_priv(objagg_obj);
+ key_id = root_key->id;
+ delta = objagg_obj_delta_priv(objagg_obj);
+ if (delta)
+ key_id += delta->key_id_diff;
+ return key_id;
+}
+
+static int
+check_expect_stats_nums(const struct objagg_obj_stats_info *stats_info,
+ const struct expect_stats_info *expect_stats_info,
+ const char **errmsg)
+{
+ if (stats_info->is_root != expect_stats_info->is_root) {
+ if (errmsg)
+ *errmsg = "Incorrect root/delta indication";
+ return -EINVAL;
+ }
+ if (stats_info->stats.user_count !=
+ expect_stats_info->stats.user_count) {
+ if (errmsg)
+ *errmsg = "Incorrect user count";
+ return -EINVAL;
+ }
+ if (stats_info->stats.delta_user_count !=
+ expect_stats_info->stats.delta_user_count) {
+ if (errmsg)
+ *errmsg = "Incorrect delta user count";
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+check_expect_stats_key_id(const struct objagg_obj_stats_info *stats_info,
+ const struct expect_stats_info *expect_stats_info,
+ const char **errmsg)
+{
+ if (obj_to_key_id(stats_info->objagg_obj) !=
+ expect_stats_info->key_id) {
+ if (errmsg)
+ *errmsg = "incorrect key id";
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int check_expect_stats_neigh(const struct objagg_stats *stats,
+ const struct expect_stats *expect_stats,
+ int pos)
+{
+ int i;
+ int err;
+
+ for (i = pos - 1; i >= 0; i--) {
+ err = check_expect_stats_nums(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (err)
+ break;
+ err = check_expect_stats_key_id(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (!err)
+ return 0;
+ }
+ for (i = pos + 1; i < stats->stats_info_count; i++) {
+ err = check_expect_stats_nums(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (err)
+ break;
+ err = check_expect_stats_key_id(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (!err)
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int __check_expect_stats(const struct objagg_stats *stats,
+ const struct expect_stats *expect_stats,
+ const char **errmsg)
+{
+ int i;
+ int err;
+
+ if (stats->stats_info_count != expect_stats->info_count) {
+ *errmsg = "Unexpected object count";
+ return -EINVAL;
+ }
+
+ for (i = 0; i < stats->stats_info_count; i++) {
+ err = check_expect_stats_nums(&stats->stats_info[i],
+ &expect_stats->info[i], errmsg);
+ if (err)
+ return err;
+ err = check_expect_stats_key_id(&stats->stats_info[i],
+ &expect_stats->info[i], errmsg);
+ if (err) {
+ /* It is possible that one of the neighbor stats with
+ * same numbers have the correct key id, so check it
+ */
+ err = check_expect_stats_neigh(stats, expect_stats, i);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int check_expect_stats(struct objagg *objagg,
+ const struct expect_stats *expect_stats,
+ const char **errmsg)
+{
+ const struct objagg_stats *stats;
+ int err;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats)) {
+ *errmsg = "objagg_stats_get() failed.";
+ return PTR_ERR(stats);
+ }
+ err = __check_expect_stats(stats, expect_stats, errmsg);
+ objagg_stats_put(stats);
+ return err;
+}
+
+static int test_delta_action_item(struct world *world,
+ struct objagg *objagg,
+ const struct action_item *action_item,
+ bool inverse)
+{
+ unsigned int orig_delta_count = world->delta_count;
+ unsigned int orig_root_count = world->root_count;
+ unsigned int key_id = action_item->key_id;
+ enum action action = action_item->action;
+ struct objagg_obj *objagg_obj;
+ const char *errmsg;
+ int err;
+
+ if (inverse)
+ action = action == ACTION_GET ? ACTION_PUT : ACTION_GET;
+
+ switch (action) {
+ case ACTION_GET:
+ objagg_obj = world_obj_get(world, objagg, key_id);
+ if (IS_ERR(objagg_obj))
+ return PTR_ERR(objagg_obj);
+ break;
+ case ACTION_PUT:
+ world_obj_put(world, objagg, key_id);
+ break;
+ }
+
+ if (inverse)
+ return 0;
+ err = check_expect(world, action_item,
+ orig_delta_count, orig_root_count);
+ if (err)
+ goto errout;
+
+ err = check_expect_stats(objagg, &action_item->expect_stats, &errmsg);
+ if (err) {
+ pr_err("Key %u: Stats: %s\n", action_item->key_id, errmsg);
+ goto errout;
+ }
+
+ return 0;
+
+errout:
+ /* This can only happen when action is not inversed.
+ * So in case of an error, cleanup by doing inverse action.
+ */
+ test_delta_action_item(world, objagg, action_item, true);
+ return err;
+}
+
+static int test_delta(void)
+{
+ struct world world = {};
+ struct objagg *objagg;
+ int i;
+ int err;
+
+ objagg = objagg_create(&delta_ops, NULL, &world);
+ if (IS_ERR(objagg))
+ return PTR_ERR(objagg);
+
+ for (i = 0; i < ARRAY_SIZE(action_items); i++) {
+ err = test_delta_action_item(&world, objagg,
+ &action_items[i], false);
+ if (err)
+ goto err_do_action_item;
+ }
+
+ objagg_destroy(objagg);
+ return 0;
+
+err_do_action_item:
+ for (i--; i >= 0; i--)
+ test_delta_action_item(&world, objagg, &action_items[i], true);
+
+ objagg_destroy(objagg);
+ return err;
+}
+
+struct hints_case {
+ const unsigned int *key_ids;
+ size_t key_ids_count;
+ struct expect_stats expect_stats;
+ struct expect_stats expect_stats_hints;
+};
+
+static const unsigned int hints_case_key_ids[] = {
+ 1, 7, 3, 5, 3, 1, 30, 8, 8, 5, 6, 8,
+};
+
+static const struct hints_case hints_case = {
+ .key_ids = hints_case_key_ids,
+ .key_ids_count = ARRAY_SIZE(hints_case_key_ids),
+ .expect_stats =
+ EXPECT_STATS(7, ROOT(1, 2, 7), ROOT(7, 1, 4), ROOT(30, 1, 1),
+ DELTA(8, 3), DELTA(3, 2),
+ DELTA(5, 2), DELTA(6, 1)),
+ .expect_stats_hints =
+ EXPECT_STATS(7, ROOT(3, 2, 9), ROOT(1, 2, 2), ROOT(30, 1, 1),
+ DELTA(8, 3), DELTA(5, 2),
+ DELTA(6, 1), DELTA(7, 1)),
+};
+
+static void __pr_debug_stats(const struct objagg_stats *stats)
+{
+ int i;
+
+ for (i = 0; i < stats->stats_info_count; i++)
+ pr_debug("Stat index %d key %u: u %d, d %d, %s\n", i,
+ obj_to_key_id(stats->stats_info[i].objagg_obj),
+ stats->stats_info[i].stats.user_count,
+ stats->stats_info[i].stats.delta_user_count,
+ stats->stats_info[i].is_root ? "root" : "noroot");
+}
+
+static void pr_debug_stats(struct objagg *objagg)
+{
+ const struct objagg_stats *stats;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats))
+ return;
+ __pr_debug_stats(stats);
+ objagg_stats_put(stats);
+}
+
+static void pr_debug_hints_stats(struct objagg_hints *objagg_hints)
+{
+ const struct objagg_stats *stats;
+
+ stats = objagg_hints_stats_get(objagg_hints);
+ if (IS_ERR(stats))
+ return;
+ __pr_debug_stats(stats);
+ objagg_stats_put(stats);
+}
+
+static int check_expect_hints_stats(struct objagg_hints *objagg_hints,
+ const struct expect_stats *expect_stats,
+ const char **errmsg)
+{
+ const struct objagg_stats *stats;
+ int err;
+
+ stats = objagg_hints_stats_get(objagg_hints);
+ if (IS_ERR(stats))
+ return PTR_ERR(stats);
+ err = __check_expect_stats(stats, expect_stats, errmsg);
+ objagg_stats_put(stats);
+ return err;
+}
+
+static int test_hints_case(const struct hints_case *hints_case)
+{
+ struct objagg_obj *objagg_obj;
+ struct objagg_hints *hints;
+ struct world world2 = {};
+ struct world world = {};
+ struct objagg *objagg2;
+ struct objagg *objagg;
+ const char *errmsg;
+ int i;
+ int err;
+
+ objagg = objagg_create(&delta_ops, NULL, &world);
+ if (IS_ERR(objagg))
+ return PTR_ERR(objagg);
+
+ for (i = 0; i < hints_case->key_ids_count; i++) {
+ objagg_obj = world_obj_get(&world, objagg,
+ hints_case->key_ids[i]);
+ if (IS_ERR(objagg_obj)) {
+ err = PTR_ERR(objagg_obj);
+ goto err_world_obj_get;
+ }
+ }
+
+ pr_debug_stats(objagg);
+ err = check_expect_stats(objagg, &hints_case->expect_stats, &errmsg);
+ if (err) {
+ pr_err("Stats: %s\n", errmsg);
+ goto err_check_expect_stats;
+ }
+
+ hints = objagg_hints_get(objagg, OBJAGG_OPT_ALGO_SIMPLE_GREEDY);
+ if (IS_ERR(hints)) {
+ err = PTR_ERR(hints);
+ goto err_hints_get;
+ }
+
+ pr_debug_hints_stats(hints);
+ err = check_expect_hints_stats(hints, &hints_case->expect_stats_hints,
+ &errmsg);
+ if (err) {
+ pr_err("Hints stats: %s\n", errmsg);
+ goto err_check_expect_hints_stats;
+ }
+
+ objagg2 = objagg_create(&delta_ops, hints, &world2);
+ if (IS_ERR(objagg2))
+ return PTR_ERR(objagg2);
+
+ for (i = 0; i < hints_case->key_ids_count; i++) {
+ objagg_obj = world_obj_get(&world2, objagg2,
+ hints_case->key_ids[i]);
+ if (IS_ERR(objagg_obj)) {
+ err = PTR_ERR(objagg_obj);
+ goto err_world2_obj_get;
+ }
+ }
+
+ pr_debug_stats(objagg2);
+ err = check_expect_stats(objagg2, &hints_case->expect_stats_hints,
+ &errmsg);
+ if (err) {
+ pr_err("Stats2: %s\n", errmsg);
+ goto err_check_expect_stats2;
+ }
+
+ err = 0;
+
+err_check_expect_stats2:
+err_world2_obj_get:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world2, objagg, hints_case->key_ids[i]);
+ objagg_hints_put(hints);
+ objagg_destroy(objagg2);
+ i = hints_case->key_ids_count;
+err_check_expect_hints_stats:
+err_hints_get:
+err_check_expect_stats:
+err_world_obj_get:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world, objagg, hints_case->key_ids[i]);
+
+ objagg_destroy(objagg);
+ return err;
+}
+static int test_hints(void)
+{
+ return test_hints_case(&hints_case);
+}
+
+static int __init test_objagg_init(void)
+{
+ int err;
+
+ err = test_nodelta();
+ if (err)
+ return err;
+ err = test_delta();
+ if (err)
+ return err;
+ return test_hints();
+}
+
+static void __exit test_objagg_exit(void)
+{
+}
+
+module_init(test_objagg_init);
+module_exit(test_objagg_exit);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Test module for objagg");
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 53527ea822b5..659b6cc0d483 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/random.h>
+#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -249,12 +250,11 @@ plain_format(void)
#endif /* BITS_PER_LONG == 64 */
static int __init
-plain_hash(void)
+plain_hash_to_buffer(const void *p, char *buf, size_t len)
{
- char buf[PLAIN_BUF_SIZE];
int nchars;
- nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
+ nchars = snprintf(buf, len, "%p", p);
if (nchars != PTR_WIDTH)
return -1;
@@ -265,6 +265,20 @@ plain_hash(void)
return 0;
}
+ return 0;
+}
+
+
+static int __init
+plain_hash(void)
+{
+ char buf[PLAIN_BUF_SIZE];
+ int ret;
+
+ ret = plain_hash_to_buffer(PTR, buf, PLAIN_BUF_SIZE);
+ if (ret)
+ return ret;
+
if (strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
return -1;
@@ -295,6 +309,23 @@ plain(void)
}
static void __init
+test_hashed(const char *fmt, const void *p)
+{
+ char buf[PLAIN_BUF_SIZE];
+ int ret;
+
+ /*
+ * No need to increase failed test counter since this is assumed
+ * to be called after plain().
+ */
+ ret = plain_hash_to_buffer(p, buf, PLAIN_BUF_SIZE);
+ if (ret)
+ return;
+
+ test(buf, fmt, p);
+}
+
+static void __init
symbol_ptr(void)
{
}
@@ -419,6 +450,29 @@ struct_va_format(void)
}
static void __init
+struct_rtc_time(void)
+{
+ /* 1543210543 */
+ const struct rtc_time tm = {
+ .tm_sec = 43,
+ .tm_min = 35,
+ .tm_hour = 5,
+ .tm_mday = 26,
+ .tm_mon = 10,
+ .tm_year = 118,
+ };
+
+ test_hashed("%pt", &tm);
+
+ test("2018-11-26T05:35:43", "%ptR", &tm);
+ test("0118-10-26T05:35:43", "%ptRr", &tm);
+ test("05:35:43|2018-11-26", "%ptRt|%ptRd", &tm, &tm);
+ test("05:35:43|0118-10-26", "%ptRtr|%ptRdr", &tm, &tm);
+ test("05:35:43|2018-11-26", "%ptRttr|%ptRdtr", &tm, &tm);
+ test("05:35:43 tr|2018-11-26 tr", "%ptRt tr|%ptRd tr", &tm, &tm);
+}
+
+static void __init
struct_clk(void)
{
}
@@ -529,6 +583,7 @@ test_pointer(void)
uuid();
dentry();
struct_va_format();
+ struct_rtc_time();
struct_clk();
bitmap();
netdev_features();
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 82ac39ce5310..3bd2e91bfc29 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -20,11 +20,11 @@
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/rhashtable.h>
-#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
+#include <linux/wait.h>
#define MAX_ENTRIES 1000000
#define TEST_INSERT_FAIL INT_MAX
@@ -112,8 +112,8 @@ static struct rhashtable_params test_rht_params_dup = {
.automatic_shrinking = false,
};
-static struct semaphore prestart_sem;
-static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
+static atomic_t startup_count;
+static DECLARE_WAIT_QUEUE_HEAD(startup_wait);
static int insert_retry(struct rhashtable *ht, struct test_obj *obj,
const struct rhashtable_params params)
@@ -177,16 +177,11 @@ static int __init test_rht_lookup(struct rhashtable *ht, struct test_obj *array,
static void test_bucket_stats(struct rhashtable *ht, unsigned int entries)
{
- unsigned int err, total = 0, chain_len = 0;
+ unsigned int total = 0, chain_len = 0;
struct rhashtable_iter hti;
struct rhash_head *pos;
- err = rhashtable_walk_init(ht, &hti, GFP_KERNEL);
- if (err) {
- pr_warn("Test failed: allocation error");
- return;
- }
-
+ rhashtable_walk_enter(ht, &hti);
rhashtable_walk_start(&hti);
while ((pos = rhashtable_walk_next(&hti))) {
@@ -395,7 +390,7 @@ static int __init test_rhltable(unsigned int entries)
if (WARN(err, "cannot remove element at slot %d", i))
continue;
} else {
- if (WARN(err != -ENOENT, "removed non-existant element %d, error %d not %d",
+ if (WARN(err != -ENOENT, "removed non-existent element %d, error %d not %d",
i, err, -ENOENT))
continue;
}
@@ -440,7 +435,7 @@ static int __init test_rhltable(unsigned int entries)
if (WARN(err, "cannot remove element at slot %d", i))
continue;
} else {
- if (WARN(err != -ENOENT, "removed non-existant element, error %d not %d",
+ if (WARN(err != -ENOENT, "removed non-existent element, error %d not %d",
err, -ENOENT))
continue;
}
@@ -541,38 +536,45 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
int cnt, bool slow)
{
- struct rhltable rhlt;
+ struct rhltable *rhlt;
unsigned int i, ret;
const char *key;
int err = 0;
- err = rhltable_init(&rhlt, &test_rht_params_dup);
- if (WARN_ON(err))
+ rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
+ if (WARN_ON(!rhlt))
+ return -EINVAL;
+
+ err = rhltable_init(rhlt, &test_rht_params_dup);
+ if (WARN_ON(err)) {
+ kfree(rhlt);
return err;
+ }
for (i = 0; i < cnt; i++) {
rhl_test_objects[i].value.tid = i;
- key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead);
+ key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead);
key += test_rht_params_dup.key_offset;
if (slow) {
- err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key,
+ err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key,
&rhl_test_objects[i].list_node.rhead));
if (err == -EAGAIN)
err = 0;
} else
- err = rhltable_insert(&rhlt,
+ err = rhltable_insert(rhlt,
&rhl_test_objects[i].list_node,
test_rht_params_dup);
if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
goto skip_print;
}
- ret = print_ht(&rhlt);
+ ret = print_ht(rhlt);
WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
skip_print:
- rhltable_destroy(&rhlt);
+ rhltable_destroy(rhlt);
+ kfree(rhlt);
return 0;
}
@@ -634,9 +636,12 @@ static int threadfunc(void *data)
int i, step, err = 0, insert_retries = 0;
struct thread_data *tdata = data;
- up(&prestart_sem);
- if (down_interruptible(&startup_sem))
- pr_err(" thread[%d]: down_interruptible failed\n", tdata->id);
+ if (atomic_dec_and_test(&startup_count))
+ wake_up(&startup_wait);
+ if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == -1)) {
+ pr_err(" thread[%d]: interrupted\n", tdata->id);
+ goto out;
+ }
for (i = 0; i < tdata->entries; i++) {
tdata->objs[i].value.id = i;
@@ -755,7 +760,7 @@ static int __init test_rht_init(void)
pr_info("Testing concurrent rhashtable access from %d threads\n",
tcount);
- sema_init(&prestart_sem, 1 - tcount);
+ atomic_set(&startup_count, tcount);
tdata = vzalloc(array_size(tcount, sizeof(struct thread_data)));
if (!tdata)
return -ENOMEM;
@@ -781,15 +786,18 @@ static int __init test_rht_init(void)
tdata[i].objs = objs + i * entries;
tdata[i].task = kthread_run(threadfunc, &tdata[i],
"rhashtable_thrad[%d]", i);
- if (IS_ERR(tdata[i].task))
+ if (IS_ERR(tdata[i].task)) {
pr_err(" kthread_run failed for thread %d\n", i);
- else
+ atomic_dec(&startup_count);
+ } else {
started_threads++;
+ }
}
- if (down_interruptible(&prestart_sem))
- pr_err(" down interruptible failed\n");
- for (i = 0; i < tcount; i++)
- up(&startup_sem);
+ if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == 0))
+ pr_err(" wait_event interruptible failed\n");
+ /* count is 0 now, set it to -1 and wake up all threads together */
+ atomic_dec(&startup_count);
+ wake_up_all(&startup_wait);
for (i = 0; i < tcount; i++) {
if (IS_ERR(tdata[i].task))
continue;
diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c
new file mode 100644
index 000000000000..13115b6f2b88
--- /dev/null
+++ b/lib/test_stackinit.c
@@ -0,0 +1,378 @@
+// SPDX-Licenses: GPLv2
+/*
+ * Test cases for compiler-based stack variable zeroing via future
+ * compiler flags or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+/* Exfiltration buffer. */
+#define MAX_VAR_SIZE 128
+static char check_buf[MAX_VAR_SIZE];
+
+/* Character array to trigger stack protector in all functions. */
+#define VAR_BUFFER 32
+
+/* Volatile mask to convince compiler to copy memory with 0xff. */
+static volatile u8 forced_mask = 0xff;
+
+/* Location and size tracking to validate fill and test are colocated. */
+static void *fill_start, *target_start;
+static size_t fill_size, target_size;
+
+static bool range_contains(char *haystack_start, size_t haystack_size,
+ char *needle_start, size_t needle_size)
+{
+ if (needle_start >= haystack_start &&
+ needle_start + needle_size <= haystack_start + haystack_size)
+ return true;
+ return false;
+}
+
+#define DO_NOTHING_TYPE_SCALAR(var_type) var_type
+#define DO_NOTHING_TYPE_STRING(var_type) void
+#define DO_NOTHING_TYPE_STRUCT(var_type) void
+
+#define DO_NOTHING_RETURN_SCALAR(ptr) *(ptr)
+#define DO_NOTHING_RETURN_STRING(ptr) /**/
+#define DO_NOTHING_RETURN_STRUCT(ptr) /**/
+
+#define DO_NOTHING_CALL_SCALAR(var, name) \
+ (var) = do_nothing_ ## name(&(var))
+#define DO_NOTHING_CALL_STRING(var, name) \
+ do_nothing_ ## name(var)
+#define DO_NOTHING_CALL_STRUCT(var, name) \
+ do_nothing_ ## name(&(var))
+
+#define FETCH_ARG_SCALAR(var) &var
+#define FETCH_ARG_STRING(var) var
+#define FETCH_ARG_STRUCT(var) &var
+
+#define FILL_SIZE_STRING 16
+
+#define INIT_CLONE_SCALAR /**/
+#define INIT_CLONE_STRING [FILL_SIZE_STRING]
+#define INIT_CLONE_STRUCT /**/
+
+#define INIT_SCALAR_none /**/
+#define INIT_SCALAR_zero = 0
+
+#define INIT_STRING_none [FILL_SIZE_STRING] /**/
+#define INIT_STRING_zero [FILL_SIZE_STRING] = { }
+
+#define INIT_STRUCT_none /**/
+#define INIT_STRUCT_zero = { }
+#define INIT_STRUCT_static_partial = { .two = 0, }
+#define INIT_STRUCT_static_all = { .one = arg->one, \
+ .two = arg->two, \
+ .three = arg->three, \
+ .four = arg->four, \
+ }
+#define INIT_STRUCT_dynamic_partial = { .two = arg->two, }
+#define INIT_STRUCT_dynamic_all = { .one = arg->one, \
+ .two = arg->two, \
+ .three = arg->three, \
+ .four = arg->four, \
+ }
+#define INIT_STRUCT_runtime_partial ; \
+ var.two = 0
+#define INIT_STRUCT_runtime_all ; \
+ var.one = 0; \
+ var.two = 0; \
+ var.three = 0; \
+ memset(&var.four, 0, \
+ sizeof(var.four))
+
+/*
+ * @name: unique string name for the test
+ * @var_type: type to be tested for zeroing initialization
+ * @which: is this a SCALAR, STRING, or STRUCT type?
+ * @init_level: what kind of initialization is performed
+ */
+#define DEFINE_TEST_DRIVER(name, var_type, which) \
+/* Returns 0 on success, 1 on failure. */ \
+static noinline __init int test_ ## name (void) \
+{ \
+ var_type zero INIT_CLONE_ ## which; \
+ int ignored; \
+ u8 sum = 0, i; \
+ \
+ /* Notice when a new test is larger than expected. */ \
+ BUILD_BUG_ON(sizeof(zero) > MAX_VAR_SIZE); \
+ \
+ /* Fill clone type with zero for per-field init. */ \
+ memset(&zero, 0x00, sizeof(zero)); \
+ /* Fill stack with 0xFF. */ \
+ ignored = leaf_ ##name((unsigned long)&ignored, 1, \
+ FETCH_ARG_ ## which(zero)); \
+ /* Clear entire check buffer for later bit tests. */ \
+ memset(check_buf, 0x00, sizeof(check_buf)); \
+ /* Extract stack-defined variable contents. */ \
+ ignored = leaf_ ##name((unsigned long)&ignored, 0, \
+ FETCH_ARG_ ## which(zero)); \
+ \
+ /* Validate that compiler lined up fill and target. */ \
+ if (!range_contains(fill_start, fill_size, \
+ target_start, target_size)) { \
+ pr_err(#name ": stack fill missed target!?\n"); \
+ pr_err(#name ": fill %zu wide\n", fill_size); \
+ pr_err(#name ": target offset by %d\n", \
+ (int)((ssize_t)(uintptr_t)fill_start - \
+ (ssize_t)(uintptr_t)target_start)); \
+ return 1; \
+ } \
+ \
+ /* Look for any set bits in the check region. */ \
+ for (i = 0; i < sizeof(check_buf); i++) \
+ sum += (check_buf[i] != 0); \
+ \
+ if (sum == 0) \
+ pr_info(#name " ok\n"); \
+ else \
+ pr_warn(#name " FAIL (uninit bytes: %d)\n", \
+ sum); \
+ \
+ return (sum != 0); \
+}
+#define DEFINE_TEST(name, var_type, which, init_level) \
+/* no-op to force compiler into ignoring "uninitialized" vars */\
+static noinline __init DO_NOTHING_TYPE_ ## which(var_type) \
+do_nothing_ ## name(var_type *ptr) \
+{ \
+ /* Will always be true, but compiler doesn't know. */ \
+ if ((unsigned long)ptr > 0x2) \
+ return DO_NOTHING_RETURN_ ## which(ptr); \
+ else \
+ return DO_NOTHING_RETURN_ ## which(ptr + 1); \
+} \
+static noinline __init int leaf_ ## name(unsigned long sp, \
+ bool fill, \
+ var_type *arg) \
+{ \
+ char buf[VAR_BUFFER]; \
+ var_type var INIT_ ## which ## _ ## init_level; \
+ \
+ target_start = &var; \
+ target_size = sizeof(var); \
+ /* \
+ * Keep this buffer around to make sure we've got a \
+ * stack frame of SOME kind... \
+ */ \
+ memset(buf, (char)(sp && 0xff), sizeof(buf)); \
+ /* Fill variable with 0xFF. */ \
+ if (fill) { \
+ fill_start = &var; \
+ fill_size = sizeof(var); \
+ memset(fill_start, \
+ (char)((sp && 0xff) | forced_mask), \
+ fill_size); \
+ } \
+ \
+ /* Silence "never initialized" warnings. */ \
+ DO_NOTHING_CALL_ ## which(var, name); \
+ \
+ /* Exfiltrate "var". */ \
+ memcpy(check_buf, target_start, target_size); \
+ \
+ return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \
+} \
+DEFINE_TEST_DRIVER(name, var_type, which)
+
+/* Structure with no padding. */
+struct test_packed {
+ unsigned long one;
+ unsigned long two;
+ unsigned long three;
+ unsigned long four;
+};
+
+/* Simple structure with padding likely to be covered by compiler. */
+struct test_small_hole {
+ size_t one;
+ char two;
+ /* 3 byte padding hole here. */
+ int three;
+ unsigned long four;
+};
+
+/* Try to trigger unhandled padding in a structure. */
+struct test_aligned {
+ u32 internal1;
+ u64 internal2;
+} __aligned(64);
+
+struct test_big_hole {
+ u8 one;
+ u8 two;
+ u8 three;
+ /* 61 byte padding hole here. */
+ struct test_aligned four;
+} __aligned(64);
+
+struct test_trailing_hole {
+ char *one;
+ char *two;
+ char *three;
+ char four;
+ /* "sizeof(unsigned long) - 1" byte padding hole here. */
+};
+
+/* Test if STRUCTLEAK is clearing structs with __user fields. */
+struct test_user {
+ u8 one;
+ unsigned long two;
+ char __user *three;
+ unsigned long four;
+};
+
+#define DEFINE_SCALAR_TEST(name, init) \
+ DEFINE_TEST(name ## _ ## init, name, SCALAR, init)
+
+#define DEFINE_SCALAR_TESTS(init) \
+ DEFINE_SCALAR_TEST(u8, init); \
+ DEFINE_SCALAR_TEST(u16, init); \
+ DEFINE_SCALAR_TEST(u32, init); \
+ DEFINE_SCALAR_TEST(u64, init); \
+ DEFINE_TEST(char_array_ ## init, unsigned char, STRING, init)
+
+#define DEFINE_STRUCT_TEST(name, init) \
+ DEFINE_TEST(name ## _ ## init, \
+ struct test_ ## name, STRUCT, init)
+
+#define DEFINE_STRUCT_TESTS(init) \
+ DEFINE_STRUCT_TEST(small_hole, init); \
+ DEFINE_STRUCT_TEST(big_hole, init); \
+ DEFINE_STRUCT_TEST(trailing_hole, init); \
+ DEFINE_STRUCT_TEST(packed, init)
+
+/* These should be fully initialized all the time! */
+DEFINE_SCALAR_TESTS(zero);
+DEFINE_STRUCT_TESTS(zero);
+/* Static initialization: padding may be left uninitialized. */
+DEFINE_STRUCT_TESTS(static_partial);
+DEFINE_STRUCT_TESTS(static_all);
+/* Dynamic initialization: padding may be left uninitialized. */
+DEFINE_STRUCT_TESTS(dynamic_partial);
+DEFINE_STRUCT_TESTS(dynamic_all);
+/* Runtime initialization: padding may be left uninitialized. */
+DEFINE_STRUCT_TESTS(runtime_partial);
+DEFINE_STRUCT_TESTS(runtime_all);
+/* No initialization without compiler instrumentation. */
+DEFINE_SCALAR_TESTS(none);
+DEFINE_STRUCT_TESTS(none);
+DEFINE_TEST(user, struct test_user, STRUCT, none);
+
+/*
+ * Check two uses through a variable declaration outside either path,
+ * which was noticed as a special case in porting earlier stack init
+ * compiler logic.
+ */
+static int noinline __leaf_switch_none(int path, bool fill)
+{
+ switch (path) {
+ uint64_t var;
+
+ case 1:
+ target_start = &var;
+ target_size = sizeof(var);
+ if (fill) {
+ fill_start = &var;
+ fill_size = sizeof(var);
+
+ memset(fill_start, forced_mask | 0x55, fill_size);
+ }
+ memcpy(check_buf, target_start, target_size);
+ break;
+ case 2:
+ target_start = &var;
+ target_size = sizeof(var);
+ if (fill) {
+ fill_start = &var;
+ fill_size = sizeof(var);
+
+ memset(fill_start, forced_mask | 0xaa, fill_size);
+ }
+ memcpy(check_buf, target_start, target_size);
+ break;
+ default:
+ var = 5;
+ return var & forced_mask;
+ }
+ return 0;
+}
+
+static noinline __init int leaf_switch_1_none(unsigned long sp, bool fill,
+ uint64_t *arg)
+{
+ return __leaf_switch_none(1, fill);
+}
+
+static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill,
+ uint64_t *arg)
+{
+ return __leaf_switch_none(2, fill);
+}
+
+DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR);
+DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR);
+
+static int __init test_stackinit_init(void)
+{
+ unsigned int failures = 0;
+
+#define test_scalars(init) do { \
+ failures += test_u8_ ## init (); \
+ failures += test_u16_ ## init (); \
+ failures += test_u32_ ## init (); \
+ failures += test_u64_ ## init (); \
+ failures += test_char_array_ ## init (); \
+ } while (0)
+
+#define test_structs(init) do { \
+ failures += test_small_hole_ ## init (); \
+ failures += test_big_hole_ ## init (); \
+ failures += test_trailing_hole_ ## init (); \
+ failures += test_packed_ ## init (); \
+ } while (0)
+
+ /* These are explicitly initialized and should always pass. */
+ test_scalars(zero);
+ test_structs(zero);
+ /* Padding here appears to be accidentally always initialized? */
+ test_structs(dynamic_partial);
+ /* Padding initialization depends on compiler behaviors. */
+ test_structs(static_partial);
+ test_structs(static_all);
+ test_structs(dynamic_all);
+ test_structs(runtime_partial);
+ test_structs(runtime_all);
+
+ /* STRUCTLEAK_BYREF_ALL should cover everything from here down. */
+ test_scalars(none);
+ failures += test_switch_1_none();
+ failures += test_switch_2_none();
+
+ /* STRUCTLEAK_BYREF should cover from here down. */
+ test_structs(none);
+
+ /* STRUCTLEAK will only cover this. */
+ failures += test_user();
+
+ if (failures == 0)
+ pr_info("all tests passed!\n");
+ else
+ pr_err("failures: %u\n", failures);
+
+ return failures ? -EINVAL : 0;
+}
+module_init(test_stackinit_init);
+
+static void __exit test_stackinit_exit(void)
+{ }
+module_exit(test_stackinit_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 280f4979d00e..9ea10adf7a66 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -42,14 +42,6 @@ static void test_ubsan_divrem_overflow(void)
val /= val2;
}
-static void test_ubsan_vla_bound_not_positive(void)
-{
- volatile int size = -1;
- char buf[size];
-
- (void)buf;
-}
-
static void test_ubsan_shift_out_of_bounds(void)
{
volatile int val = -1;
@@ -61,7 +53,7 @@ static void test_ubsan_shift_out_of_bounds(void)
static void test_ubsan_out_of_bounds(void)
{
volatile int i = 4, j = 5;
- volatile int arr[i];
+ volatile int arr[4];
arr[j] = i;
}
@@ -113,7 +105,6 @@ static const test_ubsan_fp test_ubsan_array[] = {
test_ubsan_mul_overflow,
test_ubsan_negate_overflow,
test_ubsan_divrem_overflow,
- test_ubsan_vla_bound_not_positive,
test_ubsan_shift_out_of_bounds,
test_ubsan_out_of_bounds,
test_ubsan_load_invalid_value,
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
new file mode 100644
index 000000000000..f832b095afba
--- /dev/null
+++ b/lib/test_vmalloc.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Test module for stress and analyze performance of vmalloc allocator.
+ * (C) 2018 Uladzislau Rezki (Sony) <urezki@gmail.com>
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/random.h>
+#include <linux/kthread.h>
+#include <linux/moduleparam.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/rwsem.h>
+#include <linux/mm.h>
+
+#define __param(type, name, init, msg) \
+ static type name = init; \
+ module_param(name, type, 0444); \
+ MODULE_PARM_DESC(name, msg) \
+
+__param(bool, single_cpu_test, false,
+ "Use single first online CPU to run tests");
+
+__param(bool, sequential_test_order, false,
+ "Use sequential stress tests order");
+
+__param(int, test_repeat_count, 1,
+ "Set test repeat counter");
+
+__param(int, test_loop_count, 1000000,
+ "Set test loop counter");
+
+__param(int, run_test_mask, INT_MAX,
+ "Set tests specified in the mask.\n\n"
+ "\t\tid: 1, name: fix_size_alloc_test\n"
+ "\t\tid: 2, name: full_fit_alloc_test\n"
+ "\t\tid: 4, name: long_busy_list_alloc_test\n"
+ "\t\tid: 8, name: random_size_alloc_test\n"
+ "\t\tid: 16, name: fix_align_alloc_test\n"
+ "\t\tid: 32, name: random_size_align_alloc_test\n"
+ "\t\tid: 64, name: align_shift_alloc_test\n"
+ "\t\tid: 128, name: pcpu_alloc_test\n"
+ /* Add a new test case description here. */
+);
+
+/*
+ * Depends on single_cpu_test parameter. If it is true, then
+ * use first online CPU to trigger a test on, otherwise go with
+ * all online CPUs.
+ */
+static cpumask_t cpus_run_test_mask = CPU_MASK_NONE;
+
+/*
+ * Read write semaphore for synchronization of setup
+ * phase that is done in main thread and workers.
+ */
+static DECLARE_RWSEM(prepare_for_test_rwsem);
+
+/*
+ * Completion tracking for worker threads.
+ */
+static DECLARE_COMPLETION(test_all_done_comp);
+static atomic_t test_n_undone = ATOMIC_INIT(0);
+
+static inline void
+test_report_one_done(void)
+{
+ if (atomic_dec_and_test(&test_n_undone))
+ complete(&test_all_done_comp);
+}
+
+static int random_size_align_alloc_test(void)
+{
+ unsigned long size, align, rnd;
+ void *ptr;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ get_random_bytes(&rnd, sizeof(rnd));
+
+ /*
+ * Maximum 1024 pages, if PAGE_SIZE is 4096.
+ */
+ align = 1 << (rnd % 23);
+
+ /*
+ * Maximum 10 pages.
+ */
+ size = ((rnd % 10) + 1) * PAGE_SIZE;
+
+ ptr = __vmalloc_node_range(size, align,
+ VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL | __GFP_ZERO,
+ PAGE_KERNEL,
+ 0, 0, __builtin_return_address(0));
+
+ if (!ptr)
+ return -1;
+
+ vfree(ptr);
+ }
+
+ return 0;
+}
+
+/*
+ * This test case is supposed to be failed.
+ */
+static int align_shift_alloc_test(void)
+{
+ unsigned long align;
+ void *ptr;
+ int i;
+
+ for (i = 0; i < BITS_PER_LONG; i++) {
+ align = ((unsigned long) 1) << i;
+
+ ptr = __vmalloc_node_range(PAGE_SIZE, align,
+ VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL | __GFP_ZERO,
+ PAGE_KERNEL,
+ 0, 0, __builtin_return_address(0));
+
+ if (!ptr)
+ return -1;
+
+ vfree(ptr);
+ }
+
+ return 0;
+}
+
+static int fix_align_alloc_test(void)
+{
+ void *ptr;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ ptr = __vmalloc_node_range(5 * PAGE_SIZE,
+ THREAD_ALIGN << 1,
+ VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL | __GFP_ZERO,
+ PAGE_KERNEL,
+ 0, 0, __builtin_return_address(0));
+
+ if (!ptr)
+ return -1;
+
+ vfree(ptr);
+ }
+
+ return 0;
+}
+
+static int random_size_alloc_test(void)
+{
+ unsigned int n;
+ void *p;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ get_random_bytes(&n, sizeof(i));
+ n = (n % 100) + 1;
+
+ p = vmalloc(n * PAGE_SIZE);
+
+ if (!p)
+ return -1;
+
+ *((__u8 *)p) = 1;
+ vfree(p);
+ }
+
+ return 0;
+}
+
+static int long_busy_list_alloc_test(void)
+{
+ void *ptr_1, *ptr_2;
+ void **ptr;
+ int rv = -1;
+ int i;
+
+ ptr = vmalloc(sizeof(void *) * 15000);
+ if (!ptr)
+ return rv;
+
+ for (i = 0; i < 15000; i++)
+ ptr[i] = vmalloc(1 * PAGE_SIZE);
+
+ for (i = 0; i < test_loop_count; i++) {
+ ptr_1 = vmalloc(100 * PAGE_SIZE);
+ if (!ptr_1)
+ goto leave;
+
+ ptr_2 = vmalloc(1 * PAGE_SIZE);
+ if (!ptr_2) {
+ vfree(ptr_1);
+ goto leave;
+ }
+
+ *((__u8 *)ptr_1) = 0;
+ *((__u8 *)ptr_2) = 1;
+
+ vfree(ptr_1);
+ vfree(ptr_2);
+ }
+
+ /* Success */
+ rv = 0;
+
+leave:
+ for (i = 0; i < 15000; i++)
+ vfree(ptr[i]);
+
+ vfree(ptr);
+ return rv;
+}
+
+static int full_fit_alloc_test(void)
+{
+ void **ptr, **junk_ptr, *tmp;
+ int junk_length;
+ int rv = -1;
+ int i;
+
+ junk_length = fls(num_online_cpus());
+ junk_length *= (32 * 1024 * 1024 / PAGE_SIZE);
+
+ ptr = vmalloc(sizeof(void *) * junk_length);
+ if (!ptr)
+ return rv;
+
+ junk_ptr = vmalloc(sizeof(void *) * junk_length);
+ if (!junk_ptr) {
+ vfree(ptr);
+ return rv;
+ }
+
+ for (i = 0; i < junk_length; i++) {
+ ptr[i] = vmalloc(1 * PAGE_SIZE);
+ junk_ptr[i] = vmalloc(1 * PAGE_SIZE);
+ }
+
+ for (i = 0; i < junk_length; i++)
+ vfree(junk_ptr[i]);
+
+ for (i = 0; i < test_loop_count; i++) {
+ tmp = vmalloc(1 * PAGE_SIZE);
+
+ if (!tmp)
+ goto error;
+
+ *((__u8 *)tmp) = 1;
+ vfree(tmp);
+ }
+
+ /* Success */
+ rv = 0;
+
+error:
+ for (i = 0; i < junk_length; i++)
+ vfree(ptr[i]);
+
+ vfree(ptr);
+ vfree(junk_ptr);
+
+ return rv;
+}
+
+static int fix_size_alloc_test(void)
+{
+ void *ptr;
+ int i;
+
+ for (i = 0; i < test_loop_count; i++) {
+ ptr = vmalloc(3 * PAGE_SIZE);
+
+ if (!ptr)
+ return -1;
+
+ *((__u8 *)ptr) = 0;
+
+ vfree(ptr);
+ }
+
+ return 0;
+}
+
+static int
+pcpu_alloc_test(void)
+{
+ int rv = 0;
+#ifndef CONFIG_NEED_PER_CPU_KM
+ void __percpu **pcpu;
+ size_t size, align;
+ int i;
+
+ pcpu = vmalloc(sizeof(void __percpu *) * 35000);
+ if (!pcpu)
+ return -1;
+
+ for (i = 0; i < 35000; i++) {
+ unsigned int r;
+
+ get_random_bytes(&r, sizeof(i));
+ size = (r % (PAGE_SIZE / 4)) + 1;
+
+ /*
+ * Maximum PAGE_SIZE
+ */
+ get_random_bytes(&r, sizeof(i));
+ align = 1 << ((i % 11) + 1);
+
+ pcpu[i] = __alloc_percpu(size, align);
+ if (!pcpu[i])
+ rv = -1;
+ }
+
+ for (i = 0; i < 35000; i++)
+ free_percpu(pcpu[i]);
+
+ vfree(pcpu);
+#endif
+ return rv;
+}
+
+struct test_case_desc {
+ const char *test_name;
+ int (*test_func)(void);
+};
+
+static struct test_case_desc test_case_array[] = {
+ { "fix_size_alloc_test", fix_size_alloc_test },
+ { "full_fit_alloc_test", full_fit_alloc_test },
+ { "long_busy_list_alloc_test", long_busy_list_alloc_test },
+ { "random_size_alloc_test", random_size_alloc_test },
+ { "fix_align_alloc_test", fix_align_alloc_test },
+ { "random_size_align_alloc_test", random_size_align_alloc_test },
+ { "align_shift_alloc_test", align_shift_alloc_test },
+ { "pcpu_alloc_test", pcpu_alloc_test },
+ /* Add a new test case here. */
+};
+
+struct test_case_data {
+ int test_failed;
+ int test_passed;
+ u64 time;
+};
+
+/* Split it to get rid of: WARNING: line over 80 characters */
+static struct test_case_data
+ per_cpu_test_data[NR_CPUS][ARRAY_SIZE(test_case_array)];
+
+static struct test_driver {
+ struct task_struct *task;
+ unsigned long start;
+ unsigned long stop;
+ int cpu;
+} per_cpu_test_driver[NR_CPUS];
+
+static void shuffle_array(int *arr, int n)
+{
+ unsigned int rnd;
+ int i, j, x;
+
+ for (i = n - 1; i > 0; i--) {
+ get_random_bytes(&rnd, sizeof(rnd));
+
+ /* Cut the range. */
+ j = rnd % i;
+
+ /* Swap indexes. */
+ x = arr[i];
+ arr[i] = arr[j];
+ arr[j] = x;
+ }
+}
+
+static int test_func(void *private)
+{
+ struct test_driver *t = private;
+ int random_array[ARRAY_SIZE(test_case_array)];
+ int index, i, j, ret;
+ ktime_t kt;
+ u64 delta;
+
+ ret = set_cpus_allowed_ptr(current, cpumask_of(t->cpu));
+ if (ret < 0)
+ pr_err("Failed to set affinity to %d CPU\n", t->cpu);
+
+ for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
+ random_array[i] = i;
+
+ if (!sequential_test_order)
+ shuffle_array(random_array, ARRAY_SIZE(test_case_array));
+
+ /*
+ * Block until initialization is done.
+ */
+ down_read(&prepare_for_test_rwsem);
+
+ t->start = get_cycles();
+ for (i = 0; i < ARRAY_SIZE(test_case_array); i++) {
+ index = random_array[i];
+
+ /*
+ * Skip tests if run_test_mask has been specified.
+ */
+ if (!((run_test_mask & (1 << index)) >> index))
+ continue;
+
+ kt = ktime_get();
+ for (j = 0; j < test_repeat_count; j++) {
+ ret = test_case_array[index].test_func();
+ if (!ret)
+ per_cpu_test_data[t->cpu][index].test_passed++;
+ else
+ per_cpu_test_data[t->cpu][index].test_failed++;
+ }
+
+ /*
+ * Take an average time that test took.
+ */
+ delta = (u64) ktime_us_delta(ktime_get(), kt);
+ do_div(delta, (u32) test_repeat_count);
+
+ per_cpu_test_data[t->cpu][index].time = delta;
+ }
+ t->stop = get_cycles();
+
+ up_read(&prepare_for_test_rwsem);
+ test_report_one_done();
+
+ /*
+ * Wait for the kthread_stop() call.
+ */
+ while (!kthread_should_stop())
+ msleep(10);
+
+ return 0;
+}
+
+static void
+init_test_configurtion(void)
+{
+ /*
+ * Reset all data of all CPUs.
+ */
+ memset(per_cpu_test_data, 0, sizeof(per_cpu_test_data));
+
+ if (single_cpu_test)
+ cpumask_set_cpu(cpumask_first(cpu_online_mask),
+ &cpus_run_test_mask);
+ else
+ cpumask_and(&cpus_run_test_mask, cpu_online_mask,
+ cpu_online_mask);
+
+ if (test_repeat_count <= 0)
+ test_repeat_count = 1;
+
+ if (test_loop_count <= 0)
+ test_loop_count = 1;
+}
+
+static void do_concurrent_test(void)
+{
+ int cpu, ret;
+
+ /*
+ * Set some basic configurations plus sanity check.
+ */
+ init_test_configurtion();
+
+ /*
+ * Put on hold all workers.
+ */
+ down_write(&prepare_for_test_rwsem);
+
+ for_each_cpu(cpu, &cpus_run_test_mask) {
+ struct test_driver *t = &per_cpu_test_driver[cpu];
+
+ t->cpu = cpu;
+ t->task = kthread_run(test_func, t, "vmalloc_test/%d", cpu);
+
+ if (!IS_ERR(t->task))
+ /* Success. */
+ atomic_inc(&test_n_undone);
+ else
+ pr_err("Failed to start kthread for %d CPU\n", cpu);
+ }
+
+ /*
+ * Now let the workers do their job.
+ */
+ up_write(&prepare_for_test_rwsem);
+
+ /*
+ * Sleep quiet until all workers are done with 1 second
+ * interval. Since the test can take a lot of time we
+ * can run into a stack trace of the hung task. That is
+ * why we go with completion_timeout and HZ value.
+ */
+ do {
+ ret = wait_for_completion_timeout(&test_all_done_comp, HZ);
+ } while (!ret);
+
+ for_each_cpu(cpu, &cpus_run_test_mask) {
+ struct test_driver *t = &per_cpu_test_driver[cpu];
+ int i;
+
+ if (!IS_ERR(t->task))
+ kthread_stop(t->task);
+
+ for (i = 0; i < ARRAY_SIZE(test_case_array); i++) {
+ if (!((run_test_mask & (1 << i)) >> i))
+ continue;
+
+ pr_info(
+ "Summary: %s passed: %d failed: %d repeat: %d loops: %d avg: %llu usec\n",
+ test_case_array[i].test_name,
+ per_cpu_test_data[cpu][i].test_passed,
+ per_cpu_test_data[cpu][i].test_failed,
+ test_repeat_count, test_loop_count,
+ per_cpu_test_data[cpu][i].time);
+ }
+
+ pr_info("All test took CPU%d=%lu cycles\n",
+ cpu, t->stop - t->start);
+ }
+}
+
+static int vmalloc_test_init(void)
+{
+ do_concurrent_test();
+ return -EAGAIN; /* Fail will directly unload the module */
+}
+
+static void vmalloc_test_exit(void)
+{
+}
+
+module_init(vmalloc_test_init)
+module_exit(vmalloc_test_exit)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Uladzislau Rezki");
+MODULE_DESCRIPTION("vmalloc test module");
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 4676c0a1eeca..5d4bad8bd96a 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -40,9 +40,9 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
{
- u32 id = 0;
+ u32 id;
- XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index),
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
gfp) != 0);
XA_BUG_ON(xa, id != index);
}
@@ -107,8 +107,11 @@ static noinline void check_xas_retry(struct xarray *xa)
XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
XA_BUG_ON(xa, xas.xa_node != NULL);
+ rcu_read_unlock();
XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
+
+ rcu_read_lock();
XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
xas.xa_node = XAS_RESTART;
XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
@@ -199,7 +202,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
xa_set_mark(xa, index + 1, XA_MARK_0);
XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
- xa_set_mark(xa, index + 2, XA_MARK_1);
+ xa_set_mark(xa, index + 2, XA_MARK_2);
XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
xa_store_order(xa, index, order, xa_mk_index(index),
GFP_KERNEL);
@@ -209,8 +212,8 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
void *entry;
XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
- XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1));
- XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
+ XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
+ XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
/* We should see two elements in the array */
rcu_read_lock();
@@ -343,7 +346,7 @@ static noinline void check_cmpxchg(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
- XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EEXIST);
+ XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY);
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
@@ -357,45 +360,66 @@ static noinline void check_cmpxchg(struct xarray *xa)
static noinline void check_reserve(struct xarray *xa)
{
void *entry;
- unsigned long index = 0;
+ unsigned long index;
+ int count;
/* An array with a reserved entry is not empty */
XA_BUG_ON(xa, !xa_empty(xa));
- xa_reserve(xa, 12345678, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
XA_BUG_ON(xa, xa_empty(xa));
XA_BUG_ON(xa, xa_load(xa, 12345678));
xa_release(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
/* Releasing a used entry does nothing */
- xa_reserve(xa, 12345678, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
xa_release(xa, 12345678);
xa_erase_index(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
- /* cmpxchg sees a reserved entry as NULL */
- xa_reserve(xa, 12345678, GFP_KERNEL);
- XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678),
- GFP_NOWAIT) != NULL);
+ /* cmpxchg sees a reserved entry as ZERO */
+ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY,
+ xa_mk_value(12345678), GFP_NOWAIT) != NULL);
xa_release(xa, 12345678);
xa_erase_index(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
- /* And so does xa_insert */
- xa_reserve(xa, 12345678, GFP_KERNEL);
- XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0);
- xa_erase_index(xa, 12345678);
+ /* xa_insert treats it as busy */
+ XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
+ -EBUSY);
+ XA_BUG_ON(xa, xa_empty(xa));
+ XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
XA_BUG_ON(xa, !xa_empty(xa));
/* Can iterate through a reserved entry */
xa_store_index(xa, 5, GFP_KERNEL);
- xa_reserve(xa, 6, GFP_KERNEL);
+ XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
xa_store_index(xa, 7, GFP_KERNEL);
- xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ count = 0;
+ xa_for_each(xa, index, entry) {
XA_BUG_ON(xa, index != 5 && index != 7);
+ count++;
}
+ XA_BUG_ON(xa, count != 2);
+
+ /* If we free a reserved entry, we should be able to allocate it */
+ if (xa->xa_flags & XA_FLAGS_ALLOC) {
+ u32 id;
+
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
+ XA_LIMIT(5, 10), GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 8);
+
+ xa_release(xa, 6);
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
+ XA_LIMIT(5, 10), GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 6);
+ }
+
xa_destroy(xa);
}
@@ -584,64 +608,194 @@ static noinline void check_multi_store(struct xarray *xa)
#endif
}
-static DEFINE_XARRAY_ALLOC(xa0);
-
-static noinline void check_xa_alloc(void)
+static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
{
int i;
u32 id;
- /* An empty array should assign 0 to the first alloc */
- xa_alloc_index(&xa0, 0, GFP_KERNEL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+ /* An empty array should assign %base to the first alloc */
+ xa_alloc_index(xa, base, GFP_KERNEL);
/* Erasing it should make the array empty again */
- xa_erase_index(&xa0, 0);
- XA_BUG_ON(&xa0, !xa_empty(&xa0));
+ xa_erase_index(xa, base);
+ XA_BUG_ON(xa, !xa_empty(xa));
- /* And it should assign 0 again */
- xa_alloc_index(&xa0, 0, GFP_KERNEL);
+ /* And it should assign %base again */
+ xa_alloc_index(xa, base, GFP_KERNEL);
- /* The next assigned ID should be 1 */
- xa_alloc_index(&xa0, 1, GFP_KERNEL);
- xa_erase_index(&xa0, 1);
+ /* Allocating and then erasing a lot should not lose base */
+ for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
+ xa_alloc_index(xa, i, GFP_KERNEL);
+ for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
+ xa_erase_index(xa, i);
+ xa_alloc_index(xa, base, GFP_KERNEL);
+
+ /* Destroying the array should do the same as erasing */
+ xa_destroy(xa);
+
+ /* And it should assign %base again */
+ xa_alloc_index(xa, base, GFP_KERNEL);
+
+ /* The next assigned ID should be base+1 */
+ xa_alloc_index(xa, base + 1, GFP_KERNEL);
+ xa_erase_index(xa, base + 1);
/* Storing a value should mark it used */
- xa_store_index(&xa0, 1, GFP_KERNEL);
- xa_alloc_index(&xa0, 2, GFP_KERNEL);
+ xa_store_index(xa, base + 1, GFP_KERNEL);
+ xa_alloc_index(xa, base + 2, GFP_KERNEL);
- /* If we then erase 0, it should be free */
- xa_erase_index(&xa0, 0);
- xa_alloc_index(&xa0, 0, GFP_KERNEL);
+ /* If we then erase base, it should be free */
+ xa_erase_index(xa, base);
+ xa_alloc_index(xa, base, GFP_KERNEL);
- xa_erase_index(&xa0, 1);
- xa_erase_index(&xa0, 2);
+ xa_erase_index(xa, base + 1);
+ xa_erase_index(xa, base + 2);
for (i = 1; i < 5000; i++) {
- xa_alloc_index(&xa0, i, GFP_KERNEL);
+ xa_alloc_index(xa, base + i, GFP_KERNEL);
}
- xa_destroy(&xa0);
+ xa_destroy(xa);
- id = 0xfffffffeU;
- XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
+ /* Check that we fail properly at the limit of allocation */
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
+ XA_LIMIT(UINT_MAX - 1, UINT_MAX),
GFP_KERNEL) != 0);
- XA_BUG_ON(&xa0, id != 0xfffffffeU);
- XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
+ XA_BUG_ON(xa, id != 0xfffffffeU);
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
+ XA_LIMIT(UINT_MAX - 1, UINT_MAX),
GFP_KERNEL) != 0);
- XA_BUG_ON(&xa0, id != 0xffffffffU);
- XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
- GFP_KERNEL) != -ENOSPC);
- XA_BUG_ON(&xa0, id != 0xffffffffU);
- xa_destroy(&xa0);
-
- id = 10;
- XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
- GFP_KERNEL) != -ENOSPC);
- XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0);
- XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
- GFP_KERNEL) != -ENOSPC);
- xa_erase_index(&xa0, 3);
- XA_BUG_ON(&xa0, !xa_empty(&xa0));
+ XA_BUG_ON(xa, id != 0xffffffffU);
+ id = 3;
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
+ XA_LIMIT(UINT_MAX - 1, UINT_MAX),
+ GFP_KERNEL) != -EBUSY);
+ XA_BUG_ON(xa, id != 3);
+ xa_destroy(xa);
+
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
+ GFP_KERNEL) != -EBUSY);
+ XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
+ GFP_KERNEL) != -EBUSY);
+ xa_erase_index(xa, 3);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
+{
+ unsigned int i, id;
+ unsigned long index;
+ void *entry;
+
+ /* Allocate and free a NULL and check xa_empty() behaves */
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != base);
+ XA_BUG_ON(xa, xa_empty(xa));
+ XA_BUG_ON(xa, xa_erase(xa, id) != NULL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* Ditto, but check destroy instead of erase */
+ XA_BUG_ON(xa, !xa_empty(xa));
+ XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != base);
+ XA_BUG_ON(xa, xa_empty(xa));
+ xa_destroy(xa);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (i = base; i < base + 10; i++) {
+ XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
+ GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != i);
+ }
+
+ XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
+ XA_BUG_ON(xa, xa_erase(xa, 5) != NULL);
+ XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 5);
+
+ xa_for_each(xa, index, entry) {
+ xa_erase_index(xa, index);
+ }
+
+ for (i = base; i < base + 9; i++) {
+ XA_BUG_ON(xa, xa_erase(xa, i) != NULL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ }
+ XA_BUG_ON(xa, xa_erase(xa, 8) != NULL);
+ XA_BUG_ON(xa, xa_empty(xa));
+ XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ xa_destroy(xa);
+}
+
+static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
+{
+ struct xa_limit limit = XA_LIMIT(1, 0x3fff);
+ u32 next = 0;
+ unsigned int i, id;
+ unsigned long index;
+ void *entry;
+
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
+ &next, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 1);
+
+ next = 0x3ffd;
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
+ &next, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != 0x3ffd);
+ xa_erase_index(xa, 0x3ffd);
+ xa_erase_index(xa, 1);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (i = 0x3ffe; i < 0x4003; i++) {
+ if (i < 0x4000)
+ entry = xa_mk_index(i);
+ else
+ entry = xa_mk_index(i - 0x3fff);
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
+ &next, GFP_KERNEL) != (id == 1));
+ XA_BUG_ON(xa, xa_mk_index(id) != entry);
+ }
+
+ /* Check wrap-around is handled correctly */
+ if (base != 0)
+ xa_erase_index(xa, base);
+ xa_erase_index(xa, base + 1);
+ next = UINT_MAX;
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
+ xa_limit_32b, &next, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != UINT_MAX);
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
+ xa_limit_32b, &next, GFP_KERNEL) != 1);
+ XA_BUG_ON(xa, id != base);
+ XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
+ xa_limit_32b, &next, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != base + 1);
+
+ xa_for_each(xa, index, entry)
+ xa_erase_index(xa, index);
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static DEFINE_XARRAY_ALLOC(xa0);
+static DEFINE_XARRAY_ALLOC1(xa1);
+
+static noinline void check_xa_alloc(void)
+{
+ check_xa_alloc_1(&xa0, 0);
+ check_xa_alloc_1(&xa1, 1);
+ check_xa_alloc_2(&xa0, 0);
+ check_xa_alloc_2(&xa1, 1);
+ check_xa_alloc_3(&xa0, 0);
+ check_xa_alloc_3(&xa1, 1);
}
static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
@@ -812,17 +966,16 @@ static noinline void check_find_1(struct xarray *xa)
static noinline void check_find_2(struct xarray *xa)
{
void *entry;
- unsigned long i, j, index = 0;
+ unsigned long i, j, index;
- xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ xa_for_each(xa, index, entry) {
XA_BUG_ON(xa, true);
}
for (i = 0; i < 1024; i++) {
xa_store_index(xa, index, GFP_KERNEL);
j = 0;
- index = 0;
- xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+ xa_for_each(xa, index, entry) {
XA_BUG_ON(xa, xa_mk_index(index) != entry);
XA_BUG_ON(xa, index != j++);
}
@@ -839,6 +992,7 @@ static noinline void check_find_3(struct xarray *xa)
for (i = 0; i < 100; i++) {
for (j = 0; j < 100; j++) {
+ rcu_read_lock();
for (k = 0; k < 100; k++) {
xas_set(&xas, j);
xas_for_each_marked(&xas, entry, k, XA_MARK_0)
@@ -847,6 +1001,7 @@ static noinline void check_find_3(struct xarray *xa)
XA_BUG_ON(xa,
xas.xa_node != XAS_RESTART);
}
+ rcu_read_unlock();
}
xa_store_index(xa, i, GFP_KERNEL);
xa_set_mark(xa, i, XA_MARK_0);
@@ -1183,6 +1338,58 @@ static noinline void check_store_range(struct xarray *xa)
}
}
+static void check_align_1(struct xarray *xa, char *name)
+{
+ int i;
+ unsigned int id;
+ unsigned long index;
+ void *entry;
+
+ for (i = 0; i < 8; i++) {
+ XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
+ GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, id != i);
+ }
+ xa_for_each(xa, index, entry)
+ XA_BUG_ON(xa, xa_is_err(entry));
+ xa_destroy(xa);
+}
+
+/*
+ * We should always be able to store without allocating memory after
+ * reserving a slot.
+ */
+static void check_align_2(struct xarray *xa, char *name)
+{
+ int i;
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ for (i = 0; i < 8; i++) {
+ XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
+ xa_erase(xa, 0);
+ }
+
+ for (i = 0; i < 8; i++) {
+ XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0);
+ XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
+ xa_erase(xa, 0);
+ }
+
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_align(struct xarray *xa)
+{
+ char name[] = "Motorola 68000";
+
+ check_align_1(xa, name);
+ check_align_1(xa, name + 1);
+ check_align_1(xa, name + 2);
+ check_align_1(xa, name + 3);
+ check_align_2(xa, name);
+}
+
static LIST_HEAD(shadow_nodes);
static void test_update_node(struct xa_node *node)
@@ -1322,6 +1529,7 @@ static int xarray_checks(void)
check_xas_erase(&array);
check_cmpxchg(&array);
check_reserve(&array);
+ check_reserve(&xa0);
check_multi_store(&array);
check_xa_alloc();
check_find(&array);
@@ -1332,6 +1540,7 @@ static int xarray_checks(void)
check_create_range(&array);
check_store_range(&array);
check_store_iter(&array);
+ check_align(&xa0);
check_workingset(&array, 0);
check_workingset(&array, 64);
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 3744b2a8e591..c2bfbcaeb3dc 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -8,7 +8,7 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
{
unsigned long res = n;
might_fault();
- if (likely(access_ok(VERIFY_READ, from, n))) {
+ if (likely(access_ok(from, n))) {
kasan_check_write(to, n);
res = raw_copy_from_user(to, from, n);
}
@@ -23,7 +23,7 @@ EXPORT_SYMBOL(_copy_from_user);
unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
- if (likely(access_ok(VERIFY_WRITE, to, n))) {
+ if (likely(access_ok(to, n))) {
kasan_check_read(from, n);
n = raw_copy_to_user(to, from, n);
}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 37a54a6dd594..791b6fa36905 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -17,6 +17,7 @@
*/
#include <stdarg.h>
+#include <linux/build_bug.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/module.h> /* for KSYM_SYMBOL_LEN */
@@ -30,6 +31,7 @@
#include <linux/ioport.h>
#include <linux/dcache.h>
#include <linux/cred.h>
+#include <linux/rtc.h>
#include <linux/uuid.h>
#include <linux/of.h>
#include <net/addrconf.h>
@@ -404,6 +406,8 @@ struct printf_spec {
unsigned int base:8; /* number base, 8, 10 or 16 only */
signed int precision:16; /* # of digits/chars */
} __packed;
+static_assert(sizeof(struct printf_spec) == 8);
+
#define FIELD_WIDTH_MAX ((1 << 23) - 1)
#define PRECISION_MAX ((1 << 15) - 1)
@@ -421,8 +425,6 @@ char *number(char *buf, char *end, unsigned long long num,
int field_width = spec.field_width;
int precision = spec.precision;
- BUILD_BUG_ON(sizeof(struct printf_spec) != 8);
-
/* locase = 0 or 0x20. ORing digits or letters with 'locase'
* produces same digits or (maybe lowercased) letters */
locase = (spec.flags & SMALL);
@@ -822,6 +824,20 @@ static const struct printf_spec default_dec_spec = {
.precision = -1,
};
+static const struct printf_spec default_dec02_spec = {
+ .base = 10,
+ .field_width = 2,
+ .precision = -1,
+ .flags = ZEROPAD,
+};
+
+static const struct printf_spec default_dec04_spec = {
+ .base = 10,
+ .field_width = 4,
+ .precision = -1,
+ .flags = ZEROPAD,
+};
+
static noinline_for_stack
char *resource_string(char *buf, char *end, struct resource *res,
struct printf_spec spec, const char *fmt)
@@ -1550,6 +1566,87 @@ char *address_val(char *buf, char *end, const void *addr, const char *fmt)
}
static noinline_for_stack
+char *date_str(char *buf, char *end, const struct rtc_time *tm, bool r)
+{
+ int year = tm->tm_year + (r ? 0 : 1900);
+ int mon = tm->tm_mon + (r ? 0 : 1);
+
+ buf = number(buf, end, year, default_dec04_spec);
+ if (buf < end)
+ *buf = '-';
+ buf++;
+
+ buf = number(buf, end, mon, default_dec02_spec);
+ if (buf < end)
+ *buf = '-';
+ buf++;
+
+ return number(buf, end, tm->tm_mday, default_dec02_spec);
+}
+
+static noinline_for_stack
+char *time_str(char *buf, char *end, const struct rtc_time *tm, bool r)
+{
+ buf = number(buf, end, tm->tm_hour, default_dec02_spec);
+ if (buf < end)
+ *buf = ':';
+ buf++;
+
+ buf = number(buf, end, tm->tm_min, default_dec02_spec);
+ if (buf < end)
+ *buf = ':';
+ buf++;
+
+ return number(buf, end, tm->tm_sec, default_dec02_spec);
+}
+
+static noinline_for_stack
+char *rtc_str(char *buf, char *end, const struct rtc_time *tm, const char *fmt)
+{
+ bool have_t = true, have_d = true;
+ bool raw = false;
+ int count = 2;
+
+ switch (fmt[count]) {
+ case 'd':
+ have_t = false;
+ count++;
+ break;
+ case 't':
+ have_d = false;
+ count++;
+ break;
+ }
+
+ raw = fmt[count] == 'r';
+
+ if (have_d)
+ buf = date_str(buf, end, tm, raw);
+ if (have_d && have_t) {
+ /* Respect ISO 8601 */
+ if (buf < end)
+ *buf = 'T';
+ buf++;
+ }
+ if (have_t)
+ buf = time_str(buf, end, tm, raw);
+
+ return buf;
+}
+
+static noinline_for_stack
+char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec,
+ const char *fmt)
+{
+ switch (fmt[1]) {
+ case 'R':
+ return rtc_str(buf, end, (const struct rtc_time *)ptr, fmt);
+ default:
+ return ptr_to_id(buf, end, ptr, spec);
+ }
+}
+
+static noinline_for_stack
char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
const char *fmt)
{
@@ -1828,11 +1925,12 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
* - 'd[234]' For a dentry name (optionally 2-4 last components)
* - 'D[234]' Same as 'd' but for a struct file
* - 'g' For block_device name (gendisk + partition number)
+ * - 't[R][dt][r]' For time and date as represented:
+ * R struct rtc_time
* - 'C' For a clock, it prints the name (Common Clock Framework) or address
* (legacy clock framework) of the clock
* - 'Cn' For a clock, it prints the name (Common Clock Framework) or address
* (legacy clock framework) of the clock
- * - 'Cr' For a clock, it prints the current rate of the clock
* - 'G' For flags to be printed as a collection of symbolic strings that would
* construct the specific value. Supported flags given by option:
* p page flags (see struct page) given as pointer to unsigned long
@@ -1952,6 +2050,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return address_val(buf, end, ptr, fmt);
case 'd':
return dentry_name(buf, end, ptr, spec, fmt);
+ case 't':
+ return time_and_date(buf, end, ptr, spec, fmt);
case 'C':
return clock(buf, end, ptr, spec, fmt);
case 'D':
diff --git a/lib/xarray.c b/lib/xarray.c
index 5f3f9311de89..6be3acbb861f 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -57,6 +57,11 @@ static inline bool xa_track_free(const struct xarray *xa)
return xa->xa_flags & XA_FLAGS_TRACK_FREE;
}
+static inline bool xa_zero_busy(const struct xarray *xa)
+{
+ return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
+}
+
static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
{
if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
@@ -232,6 +237,8 @@ void *xas_load(struct xa_state *xas)
if (xas->xa_shift > node->shift)
break;
entry = xas_descend(xas, node);
+ if (node->shift == 0)
+ break;
}
return entry;
}
@@ -430,6 +437,8 @@ static void xas_shrink(struct xa_state *xas)
break;
if (!xa_is_node(entry) && node->shift)
break;
+ if (xa_is_zero(entry) && xa_zero_busy(xa))
+ entry = NULL;
xas->xa_node = XAS_BOUNDS;
RCU_INIT_POINTER(xa->xa_head, entry);
@@ -506,7 +515,7 @@ static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
for (;;) {
void *entry = xa_entry_locked(xas->xa, node, offset);
- if (xa_is_node(entry)) {
+ if (node->shift && xa_is_node(entry)) {
node = xa_to_node(entry);
offset = 0;
continue;
@@ -604,6 +613,7 @@ static int xas_expand(struct xa_state *xas, void *head)
/*
* xas_create() - Create a slot to store an entry in.
* @xas: XArray operation state.
+ * @allow_root: %true if we can store the entry in the root directly
*
* Most users will not need to call this function directly, as it is called
* by xas_store(). It is useful for doing conditional store operations
@@ -613,7 +623,7 @@ static int xas_expand(struct xa_state *xas, void *head)
* If the slot was newly created, returns %NULL. If it failed to create the
* slot, returns %NULL and indicates the error in @xas.
*/
-static void *xas_create(struct xa_state *xas)
+static void *xas_create(struct xa_state *xas, bool allow_root)
{
struct xarray *xa = xas->xa;
void *entry;
@@ -625,9 +635,13 @@ static void *xas_create(struct xa_state *xas)
if (xas_top(node)) {
entry = xa_head_locked(xa);
xas->xa_node = NULL;
+ if (!entry && xa_zero_busy(xa))
+ entry = XA_ZERO_ENTRY;
shift = xas_expand(xas, entry);
if (shift < 0)
return NULL;
+ if (!shift && !allow_root)
+ shift = XA_CHUNK_SHIFT;
entry = xa_head_locked(xa);
slot = &xa->xa_head;
} else if (xas_error(xas)) {
@@ -687,7 +701,7 @@ void xas_create_range(struct xa_state *xas)
xas->xa_sibs = 0;
for (;;) {
- xas_create(xas);
+ xas_create(xas, true);
if (xas_error(xas))
goto restore;
if (xas->xa_index <= (index | XA_CHUNK_MASK))
@@ -753,10 +767,12 @@ void *xas_store(struct xa_state *xas, void *entry)
void *first, *next;
bool value = xa_is_value(entry);
- if (entry)
- first = xas_create(xas);
- else
+ if (entry) {
+ bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
+ first = xas_create(xas, allow_root);
+ } else {
first = xas_load(xas);
+ }
if (xas_invalid(xas))
return first;
@@ -786,7 +802,7 @@ void *xas_store(struct xa_state *xas, void *entry)
* entry is set to NULL.
*/
rcu_assign_pointer(*slot, entry);
- if (xa_is_node(next))
+ if (xa_is_node(next) && (!node || node->shift))
xas_free_nodes(xas, xa_to_node(next));
if (!node)
break;
@@ -1251,35 +1267,6 @@ void *xas_find_conflict(struct xa_state *xas)
EXPORT_SYMBOL_GPL(xas_find_conflict);
/**
- * xa_init_flags() - Initialise an empty XArray with flags.
- * @xa: XArray.
- * @flags: XA_FLAG values.
- *
- * If you need to initialise an XArray with special flags (eg you need
- * to take the lock from interrupt context), use this function instead
- * of xa_init().
- *
- * Context: Any context.
- */
-void xa_init_flags(struct xarray *xa, gfp_t flags)
-{
- unsigned int lock_type;
- static struct lock_class_key xa_lock_irq;
- static struct lock_class_key xa_lock_bh;
-
- spin_lock_init(&xa->xa_lock);
- xa->xa_flags = flags;
- xa->xa_head = NULL;
-
- lock_type = xa_lock_type(xa);
- if (lock_type == XA_LOCK_IRQ)
- lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
- else if (lock_type == XA_LOCK_BH)
- lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
-}
-EXPORT_SYMBOL(xa_init_flags);
-
-/**
* xa_load() - Load an entry from an XArray.
* @xa: XArray.
* @index: index into array.
@@ -1308,7 +1295,6 @@ static void *xas_result(struct xa_state *xas, void *curr)
{
if (xa_is_zero(curr))
return NULL;
- XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
if (xas_error(xas))
curr = xas->xa_node;
return curr;
@@ -1319,13 +1305,12 @@ static void *xas_result(struct xa_state *xas, void *curr)
* @xa: XArray.
* @index: Index into array.
*
- * If the entry at this index is a multi-index entry then all indices will
- * be erased, and the entry will no longer be a multi-index entry.
- * This function expects the xa_lock to be held on entry.
+ * After this function returns, loading from @index will return %NULL.
+ * If the index is part of a multi-index entry, all indices will be erased
+ * and none of the entries will be part of a multi-index entry.
*
- * Context: Any context. Expects xa_lock to be held on entry. May
- * release and reacquire xa_lock if @gfp flags permit.
- * Return: The old entry at this index.
+ * Context: Any context. Expects xa_lock to be held on entry.
+ * Return: The entry which used to be at this index.
*/
void *__xa_erase(struct xarray *xa, unsigned long index)
{
@@ -1339,9 +1324,9 @@ EXPORT_SYMBOL(__xa_erase);
* @xa: XArray.
* @index: Index of entry.
*
- * This function is the equivalent of calling xa_store() with %NULL as
- * the third argument. The XArray does not need to allocate memory, so
- * the user does not need to provide GFP flags.
+ * After this function returns, loading from @index will return %NULL.
+ * If the index is part of a multi-index entry, all indices will be erased
+ * and none of the entries will be part of a multi-index entry.
*
* Context: Any context. Takes and releases the xa_lock.
* Return: The entry which used to be at this index.
@@ -1378,7 +1363,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
XA_STATE(xas, xa, index);
void *curr;
- if (WARN_ON_ONCE(xa_is_internal(entry)))
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
return XA_ERROR(-EINVAL);
if (xa_track_free(xa) && !entry)
entry = XA_ZERO_ENTRY;
@@ -1444,18 +1429,14 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
XA_STATE(xas, xa, index);
void *curr;
- if (WARN_ON_ONCE(xa_is_internal(entry)))
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
return XA_ERROR(-EINVAL);
- if (xa_track_free(xa) && !entry)
- entry = XA_ZERO_ENTRY;
do {
curr = xas_load(&xas);
- if (curr == XA_ZERO_ENTRY)
- curr = NULL;
if (curr == old) {
xas_store(&xas, entry);
- if (xa_track_free(xa))
+ if (xa_track_free(xa) && entry && !curr)
xas_clear_mark(&xas, XA_FREE_MARK);
}
} while (__xas_nomem(&xas, gfp));
@@ -1465,40 +1446,45 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
EXPORT_SYMBOL(__xa_cmpxchg);
/**
- * __xa_reserve() - Reserve this index in the XArray.
+ * __xa_insert() - Store this entry in the XArray if no entry is present.
* @xa: XArray.
* @index: Index into array.
+ * @entry: New entry.
* @gfp: Memory allocation flags.
*
- * Ensures there is somewhere to store an entry at @index in the array.
- * If there is already something stored at @index, this function does
- * nothing. If there was nothing there, the entry is marked as reserved.
- * Loading from a reserved entry returns a %NULL pointer.
- *
- * If you do not use the entry that you have reserved, call xa_release()
- * or xa_erase() to free any unnecessary memory.
+ * Inserting a NULL entry will store a reserved entry (like xa_reserve())
+ * if no entry is present. Inserting will fail if a reserved entry is
+ * present, even though loading from this index will return NULL.
*
- * Context: Any context. Expects the xa_lock to be held on entry. May
- * release the lock, sleep and reacquire the lock if the @gfp flags permit.
- * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: 0 if the store succeeded. -EBUSY if another entry was present.
+ * -ENOMEM if memory could not be allocated.
*/
-int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
+int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
{
XA_STATE(xas, xa, index);
void *curr;
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
+ return -EINVAL;
+ if (!entry)
+ entry = XA_ZERO_ENTRY;
+
do {
curr = xas_load(&xas);
if (!curr) {
- xas_store(&xas, XA_ZERO_ENTRY);
+ xas_store(&xas, entry);
if (xa_track_free(xa))
xas_clear_mark(&xas, XA_FREE_MARK);
+ } else {
+ xas_set_err(&xas, -EBUSY);
}
} while (__xas_nomem(&xas, gfp));
return xas_error(&xas);
}
-EXPORT_SYMBOL(__xa_reserve);
+EXPORT_SYMBOL(__xa_insert);
#ifdef CONFIG_XARRAY_MULTI
static void xas_set_range(struct xa_state *xas, unsigned long first,
@@ -1567,7 +1553,7 @@ void *xa_store_range(struct xarray *xa, unsigned long first,
if (last + 1)
order = __ffs(last + 1);
xas_set_order(&xas, last, order);
- xas_create(&xas);
+ xas_create(&xas, true);
if (xas_error(&xas))
goto unlock;
}
@@ -1591,25 +1577,25 @@ EXPORT_SYMBOL(xa_store_range);
* __xa_alloc() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
- * @max: Maximum ID to allocate (inclusive).
+ * @limit: Range for allocated ID.
* @entry: New entry.
* @gfp: Memory allocation flags.
*
- * Allocates an unused ID in the range specified by @id and @max.
- * Updates the @id pointer with the index, then stores the entry at that
- * index. A concurrent lookup will not see an uninitialised @id.
+ * Finds an empty entry in @xa between @limit.min and @limit.max,
+ * stores the index into the @id pointer, then stores the entry at
+ * that index. A concurrent lookup will not see an uninitialised @id.
*
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
- * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
- * there is no more space in the XArray.
+ * Return: 0 on success, -ENOMEM if memory could not be allocated or
+ * -EBUSY if there are no free entries in @limit.
*/
-int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp)
+int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
+ struct xa_limit limit, gfp_t gfp)
{
XA_STATE(xas, xa, 0);
- int err;
- if (WARN_ON_ONCE(xa_is_internal(entry)))
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
if (WARN_ON_ONCE(!xa_track_free(xa)))
return -EINVAL;
@@ -1618,22 +1604,71 @@ int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp)
entry = XA_ZERO_ENTRY;
do {
- xas.xa_index = *id;
- xas_find_marked(&xas, max, XA_FREE_MARK);
+ xas.xa_index = limit.min;
+ xas_find_marked(&xas, limit.max, XA_FREE_MARK);
if (xas.xa_node == XAS_RESTART)
- xas_set_err(&xas, -ENOSPC);
+ xas_set_err(&xas, -EBUSY);
+ else
+ *id = xas.xa_index;
xas_store(&xas, entry);
xas_clear_mark(&xas, XA_FREE_MARK);
} while (__xas_nomem(&xas, gfp));
- err = xas_error(&xas);
- if (!err)
- *id = xas.xa_index;
- return err;
+ return xas_error(&xas);
}
EXPORT_SYMBOL(__xa_alloc);
/**
+ * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
+ * @xa: XArray.
+ * @id: Pointer to ID.
+ * @entry: New entry.
+ * @limit: Range of allocated ID.
+ * @next: Pointer to next ID to allocate.
+ * @gfp: Memory allocation flags.
+ *
+ * Finds an empty entry in @xa between @limit.min and @limit.max,
+ * stores the index into the @id pointer, then stores the entry at
+ * that index. A concurrent lookup will not see an uninitialised @id.
+ * The search for an empty entry will start at @next and will wrap
+ * around if necessary.
+ *
+ * Context: Any context. Expects xa_lock to be held on entry. May
+ * release and reacquire xa_lock if @gfp flags permit.
+ * Return: 0 if the allocation succeeded without wrapping. 1 if the
+ * allocation succeeded after wrapping, -ENOMEM if memory could not be
+ * allocated or -EBUSY if there are no free entries in @limit.
+ */
+int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
+ struct xa_limit limit, u32 *next, gfp_t gfp)
+{
+ u32 min = limit.min;
+ int ret;
+
+ limit.min = max(min, *next);
+ ret = __xa_alloc(xa, id, entry, limit, gfp);
+ if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
+ xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
+ ret = 1;
+ }
+
+ if (ret < 0 && limit.min > min) {
+ limit.min = min;
+ ret = __xa_alloc(xa, id, entry, limit, gfp);
+ if (ret == 0)
+ ret = 1;
+ }
+
+ if (ret >= 0) {
+ *next = *id + 1;
+ if (*next == 0)
+ xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(__xa_alloc_cyclic);
+
+/**
* __xa_set_mark() - Set this mark on this entry while locked.
* @xa: XArray.
* @index: Index of entry.
@@ -1927,6 +1962,8 @@ void xa_destroy(struct xarray *xa)
entry = xa_head_locked(xa);
RCU_INIT_POINTER(xa->xa_head, NULL);
xas_init_marks(&xas);
+ if (xa_zero_busy(xa))
+ xa_mark_clear(xa, XA_FREE_MARK);
/* lockdep checks we're still holding the lock in xas_free_nodes() */
if (xa_is_node(entry))
xas_free_nodes(&xas, xa_to_node(entry));