aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig2
-rw-r--r--lib/Kconfig.debug466
-rw-r--r--lib/Kconfig.kasan16
-rw-r--r--lib/Kconfig.kgdb8
-rw-r--r--lib/Makefile4
-rw-r--r--lib/bitmap.c12
-rw-r--r--lib/devres.c2
-rw-r--r--lib/fdt_addresses.c2
-rw-r--r--lib/find_bit.c14
-rw-r--r--lib/genalloc.c7
-rw-r--r--lib/iov_iter.c3
-rw-r--r--lib/logic_pio.c14
-rw-r--r--lib/math/rational.c63
-rw-r--r--lib/raid6/unroll.awk2
-rw-r--r--lib/sbitmap.c2
-rw-r--r--lib/strncpy_from_user.c14
-rw-r--r--lib/strnlen_user.c14
-rw-r--r--lib/test_bitmap.c202
-rw-r--r--lib/test_kasan.c26
-rw-r--r--lib/test_meminit.c20
-rw-r--r--lib/test_xarray.c78
-rw-r--r--lib/ubsan.c64
-rw-r--r--lib/vdso/gettimeofday.c1
-rw-r--r--lib/vsprintf.c40
-rw-r--r--lib/xarray.c41
25 files changed, 716 insertions, 401 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 6d7c5877c9f1..6e790dc55c5b 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -572,7 +572,7 @@ config OID_REGISTRY
Enable fast lookup object identifier registry.
config UCS2_STRING
- tristate
+ tristate
#
# generic vdso
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ab9a8efec9b3..5ffe144c9794 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -128,8 +128,8 @@ config DYNAMIC_DEBUG
lineno : line number of the debug statement
module : module that contains the debug statement
function : function that contains the debug statement
- flags : '=p' means the line is turned 'on' for printing
- format : the format used for the debug statement
+ flags : '=p' means the line is turned 'on' for printing
+ format : the format used for the debug statement
From a live system:
@@ -173,6 +173,15 @@ config SYMBOLIC_ERRNAME
of the number 28. It makes the kernel image slightly larger
(about 3KB), but can make the kernel logs easier to read.
+config DEBUG_BUGVERBOSE
+ bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
+ depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE)
+ default y
+ help
+ Say Y here to make BUG() panics output the file name and line number
+ of the BUG call as well as the EIP and oops trace. This aids
+ debugging but costs about 70-100K of memory.
+
endmenu # "printk and dmesg options"
menu "Compile-time checks and compiler options"
@@ -181,7 +190,7 @@ config DEBUG_INFO
bool "Compile the kernel with debug info"
depends on DEBUG_KERNEL && !COMPILE_TEST
help
- If you say Y here the resulting kernel image will include
+ If you say Y here the resulting kernel image will include
debugging info resulting in a larger kernel image.
This adds debug symbols to the kernel and modules (gcc -g), and
is needed if you intend to use kernel crashdump or binary object
@@ -278,25 +287,13 @@ config STRIP_ASM_SYMS
get_wchan() and suchlike.
config READABLE_ASM
- bool "Generate readable assembler code"
- depends on DEBUG_KERNEL
- help
- Disable some compiler optimizations that tend to generate human unreadable
- assembler output. This may make the kernel slightly slower, but it helps
- to keep kernel developers who have to stare a lot at assembler listings
- sane.
-
-config DEBUG_FS
- bool "Debug Filesystem"
+ bool "Generate readable assembler code"
+ depends on DEBUG_KERNEL
help
- debugfs is a virtual file system that kernel developers use to put
- debugging files into. Enable this option to be able to read and
- write to these files.
-
- For detailed documentation on the debugfs API, see
- Documentation/filesystems/.
-
- If unsure, say N.
+ Disable some compiler optimizations that tend to generate human unreadable
+ assembler output. This may make the kernel slightly slower, but it helps
+ to keep kernel developers who have to stare a lot at assembler listings
+ sane.
config HEADERS_INSTALL
bool "Install uapi headers to usr/include"
@@ -308,17 +305,6 @@ config HEADERS_INSTALL
user-space program samples. It is also needed by some features such
as uapi header sanity checks.
-config HEADERS_CHECK
- bool "Run sanity checks on uapi headers when building 'all'"
- depends on HEADERS_INSTALL
- help
- This option will run basic sanity checks on uapi headers when
- building the 'all' target, for example, ensure that they do not
- attempt to include files which were not exported, etc.
-
- If you're making modifications to header files which are
- relevant for userspace, say 'Y'.
-
config OPTIMIZE_INLINING
def_bool y
help
@@ -410,6 +396,8 @@ config DEBUG_FORCE_WEAK_PER_CPU
endmenu # "Compiler options"
+menu "Generic Kernel Debugging Instruments"
+
config MAGIC_SYSRQ
bool "Magic SysRq key"
depends on !UML
@@ -443,6 +431,24 @@ config MAGIC_SYSRQ_SERIAL
This option allows you to decide whether you want to enable the
magic SysRq key.
+config DEBUG_FS
+ bool "Debug Filesystem"
+ help
+ debugfs is a virtual file system that kernel developers use to put
+ debugging files into. Enable this option to be able to read and
+ write to these files.
+
+ For detailed documentation on the debugfs API, see
+ Documentation/filesystems/.
+
+ If unsure, say N.
+
+source "lib/Kconfig.kgdb"
+
+source "lib/Kconfig.ubsan"
+
+endmenu
+
config DEBUG_KERNEL
bool "Kernel debugging"
help
@@ -517,11 +523,11 @@ config DEBUG_OBJECTS_PERCPU_COUNTER
config DEBUG_OBJECTS_ENABLE_DEFAULT
int "debug_objects bootup default value (0-1)"
- range 0 1
- default "1"
- depends on DEBUG_OBJECTS
- help
- Debug objects boot parameter default value
+ range 0 1
+ default "1"
+ depends on DEBUG_OBJECTS
+ help
+ Debug objects boot parameter default value
config DEBUG_SLAB
bool "Debug slab memory allocations"
@@ -635,12 +641,24 @@ config DEBUG_STACK_USAGE
This option will slow down process creation somewhat.
+config SCHED_STACK_END_CHECK
+ bool "Detect stack corruption on calls to schedule()"
+ depends on DEBUG_KERNEL
+ default n
+ help
+ This option checks for a stack overrun on calls to schedule().
+ If the stack end location is found to be over written always panic as
+ the content of the corrupted region can no longer be trusted.
+ This is to ensure no erroneous behaviour occurs which could result in
+ data corruption or a sporadic crash at a later stage once the region
+ is examined. The runtime overhead introduced is minimal.
+
config DEBUG_VM
bool "Debug VM"
depends on DEBUG_KERNEL
help
Enable this to turn on extended checks in the virtual-memory system
- that may impact performance.
+ that may impact performance.
If unsure, say N.
@@ -767,53 +785,6 @@ source "lib/Kconfig.kasan"
endmenu # "Memory Debugging"
-config ARCH_HAS_KCOV
- bool
- help
- An architecture should select this when it can successfully
- build and run with CONFIG_KCOV. This typically requires
- disabling instrumentation for some early boot code.
-
-config CC_HAS_SANCOV_TRACE_PC
- def_bool $(cc-option,-fsanitize-coverage=trace-pc)
-
-config KCOV
- bool "Code coverage for fuzzing"
- depends on ARCH_HAS_KCOV
- depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
- select DEBUG_FS
- select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
- help
- KCOV exposes kernel code coverage information in a form suitable
- for coverage-guided fuzzing (randomized testing).
-
- If RANDOMIZE_BASE is enabled, PC values will not be stable across
- different machines and across reboots. If you need stable PC values,
- disable RANDOMIZE_BASE.
-
- For more details, see Documentation/dev-tools/kcov.rst.
-
-config KCOV_ENABLE_COMPARISONS
- bool "Enable comparison operands collection by KCOV"
- depends on KCOV
- depends on $(cc-option,-fsanitize-coverage=trace-cmp)
- help
- KCOV also exposes operands of every comparison in the instrumented
- code along with operand sizes and PCs of the comparison instructions.
- These operands can be used by fuzzing engines to improve the quality
- of fuzzing coverage.
-
-config KCOV_INSTRUMENT_ALL
- bool "Instrument all code by default"
- depends on KCOV
- default y
- help
- If you are doing generic system call fuzzing (like e.g. syzkaller),
- then you will want to instrument the whole kernel and you should
- say y here. If you are doing more targeted fuzzing (like e.g.
- filesystem fuzzing with AFL) then you will want to enable coverage
- for more specific subsets of files, and should say n here.
-
config DEBUG_SHIRQ
bool "Debug shared IRQ handlers"
depends on DEBUG_KERNEL
@@ -823,7 +794,35 @@ config DEBUG_SHIRQ
Drivers ought to be able to handle interrupts coming in at those
points; some don't and need to be caught.
-menu "Debug Lockups and Hangs"
+menu "Debug Oops, Lockups and Hangs"
+
+config PANIC_ON_OOPS
+ bool "Panic on Oops"
+ help
+ Say Y here to enable the kernel to panic when it oopses. This
+ has the same effect as setting oops=panic on the kernel command
+ line.
+
+ This feature is useful to ensure that the kernel does not do
+ anything erroneous after an oops which could result in data
+ corruption or other issues.
+
+ Say N if unsure.
+
+config PANIC_ON_OOPS_VALUE
+ int
+ range 0 1
+ default 0 if !PANIC_ON_OOPS
+ default 1 if PANIC_ON_OOPS
+
+config PANIC_TIMEOUT
+ int "panic timeout"
+ default 0
+ help
+ Set the timeout value (in seconds) until a reboot occurs when the
+ the kernel panics. If n = 0, then we wait forever. A timeout
+ value n > 0 will wait n seconds before rebooting, while a timeout
+ value n < 0 will reboot immediately.
config LOCKUP_DETECTOR
bool
@@ -981,33 +980,7 @@ config WQ_WATCHDOG
endmenu # "Debug lockups and hangs"
-config PANIC_ON_OOPS
- bool "Panic on Oops"
- help
- Say Y here to enable the kernel to panic when it oopses. This
- has the same effect as setting oops=panic on the kernel command
- line.
-
- This feature is useful to ensure that the kernel does not do
- anything erroneous after an oops which could result in data
- corruption or other issues.
-
- Say N if unsure.
-
-config PANIC_ON_OOPS_VALUE
- int
- range 0 1
- default 0 if !PANIC_ON_OOPS
- default 1 if PANIC_ON_OOPS
-
-config PANIC_TIMEOUT
- int "panic timeout"
- default 0
- help
- Set the timeout value (in seconds) until a reboot occurs when the
- the kernel panics. If n = 0, then we wait forever. A timeout
- value n > 0 will wait n seconds before rebooting, while a timeout
- value n < 0 will reboot immediately.
+menu "Scheduler Debugging"
config SCHED_DEBUG
bool "Collect scheduler debugging info"
@@ -1035,17 +1008,7 @@ config SCHEDSTATS
application, you can say N to avoid the very slight overhead
this adds.
-config SCHED_STACK_END_CHECK
- bool "Detect stack corruption on calls to schedule()"
- depends on DEBUG_KERNEL
- default n
- help
- This option checks for a stack overrun on calls to schedule().
- If the stack end location is found to be over written always panic as
- the content of the corrupted region can no longer be trusted.
- This is to ensure no erroneous behaviour occurs which could result in
- data corruption or a sporadic crash at a later stage once the region
- is examined. The runtime overhead introduced is minimal.
+endmenu
config DEBUG_TIMEKEEPING
bool "Enable extra timekeeping sanity checking"
@@ -1349,14 +1312,7 @@ config DEBUG_KOBJECT_RELEASE
config HAVE_DEBUG_BUGVERBOSE
bool
-config DEBUG_BUGVERBOSE
- bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
- depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE)
- default y
- help
- Say Y here to make BUG() panics output the file name and line number
- of the BUG call as well as the EIP and oops trace. This aids
- debugging but costs about 70-100K of memory.
+menu "Debug kernel data structures"
config DEBUG_LIST
bool "Debug linked list manipulation"
@@ -1397,6 +1353,18 @@ config DEBUG_NOTIFIERS
This is a relatively cheap check but if you care about maximum
performance, say N.
+config BUG_ON_DATA_CORRUPTION
+ bool "Trigger a BUG when data corruption is detected"
+ select DEBUG_LIST
+ help
+ Select this option if the kernel should BUG when it encounters
+ data corruption in kernel memory structures when they get checked
+ for validity.
+
+ If unsure, say N.
+
+endmenu
+
config DEBUG_CREDENTIALS
bool "Debug credential management"
depends on DEBUG_KERNEL
@@ -1430,7 +1398,7 @@ config DEBUG_WQ_FORCE_RR_CPU
be impacted.
config DEBUG_BLOCK_EXT_DEVT
- bool "Force extended block device numbers and spread them"
+ bool "Force extended block device numbers and spread them"
depends on DEBUG_KERNEL
depends on BLOCK
default n
@@ -1469,6 +1437,103 @@ config CPU_HOTPLUG_STATE_CONTROL
Say N if your are unsure.
+config LATENCYTOP
+ bool "Latency measuring infrastructure"
+ depends on DEBUG_KERNEL
+ depends on STACKTRACE_SUPPORT
+ depends on PROC_FS
+ select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
+ select KALLSYMS
+ select KALLSYMS_ALL
+ select STACKTRACE
+ select SCHEDSTATS
+ select SCHED_DEBUG
+ help
+ Enable this option if you want to use the LatencyTOP tool
+ to find out which userspace is blocking on what kernel operations.
+
+source "kernel/trace/Kconfig"
+
+config PROVIDE_OHCI1394_DMA_INIT
+ bool "Remote debugging over FireWire early on boot"
+ depends on PCI && X86
+ help
+ If you want to debug problems which hang or crash the kernel early
+ on boot and the crashing machine has a FireWire port, you can use
+ this feature to remotely access the memory of the crashed machine
+ over FireWire. This employs remote DMA as part of the OHCI1394
+ specification which is now the standard for FireWire controllers.
+
+ With remote DMA, you can monitor the printk buffer remotely using
+ firescope and access all memory below 4GB using fireproxy from gdb.
+ Even controlling a kernel debugger is possible using remote DMA.
+
+ Usage:
+
+ If ohci1394_dma=early is used as boot parameter, it will initialize
+ all OHCI1394 controllers which are found in the PCI config space.
+
+ As all changes to the FireWire bus such as enabling and disabling
+ devices cause a bus reset and thereby disable remote DMA for all
+ devices, be sure to have the cable plugged and FireWire enabled on
+ the debugging host before booting the debug target for debugging.
+
+ This code (~1k) is freed after boot. By then, the firewire stack
+ in charge of the OHCI-1394 controllers should be used instead.
+
+ See Documentation/debugging-via-ohci1394.txt for more information.
+
+source "samples/Kconfig"
+
+config ARCH_HAS_DEVMEM_IS_ALLOWED
+ bool
+
+config STRICT_DEVMEM
+ bool "Filter access to /dev/mem"
+ depends on MMU && DEVMEM
+ depends on ARCH_HAS_DEVMEM_IS_ALLOWED
+ default y if PPC || X86 || ARM64
+ help
+ If this option is disabled, you allow userspace (root) access to all
+ of memory, including kernel and userspace memory. Accidental
+ access to this is obviously disastrous, but specific access can
+ be used by people debugging the kernel. Note that with PAT support
+ enabled, even in this case there are restrictions on /dev/mem
+ use due to the cache aliasing requirements.
+
+ If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem
+ file only allows userspace access to PCI space and the BIOS code and
+ data regions. This is sufficient for dosemu and X and all common
+ users of /dev/mem.
+
+ If in doubt, say Y.
+
+config IO_STRICT_DEVMEM
+ bool "Filter I/O access to /dev/mem"
+ depends on STRICT_DEVMEM
+ help
+ If this option is disabled, you allow userspace (root) access to all
+ io-memory regardless of whether a driver is actively using that
+ range. Accidental access to this is obviously disastrous, but
+ specific access can be used by people debugging kernel drivers.
+
+ If this option is switched on, the /dev/mem file only allows
+ userspace access to *idle* io-memory ranges (see /proc/iomem) This
+ may break traditional users of /dev/mem (dosemu, legacy X, etc...)
+ if the driver using a given range cannot be disabled.
+
+ If in doubt, say Y.
+
+menu "$(SRCARCH) Debugging"
+
+source "arch/$(SRCARCH)/Kconfig.debug"
+
+endmenu
+
+menu "Kernel Testing and Coverage"
+
+source "lib/kunit/Kconfig"
+
config NOTIFIER_ERROR_INJECTION
tristate "Notifier error injection"
depends on DEBUG_KERNEL
@@ -1627,53 +1692,53 @@ config FAULT_INJECTION_STACKTRACE_FILTER
help
Provide stacktrace filter for fault-injection capabilities
-config LATENCYTOP
- bool "Latency measuring infrastructure"
- depends on DEBUG_KERNEL
- depends on STACKTRACE_SUPPORT
- depends on PROC_FS
- select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
- select KALLSYMS
- select KALLSYMS_ALL
- select STACKTRACE
- select SCHEDSTATS
- select SCHED_DEBUG
- help
- Enable this option if you want to use the LatencyTOP tool
- to find out which userspace is blocking on what kernel operations.
-
-source "kernel/trace/Kconfig"
-
-config PROVIDE_OHCI1394_DMA_INIT
- bool "Remote debugging over FireWire early on boot"
- depends on PCI && X86
+config ARCH_HAS_KCOV
+ bool
help
- If you want to debug problems which hang or crash the kernel early
- on boot and the crashing machine has a FireWire port, you can use
- this feature to remotely access the memory of the crashed machine
- over FireWire. This employs remote DMA as part of the OHCI1394
- specification which is now the standard for FireWire controllers.
+ An architecture should select this when it can successfully
+ build and run with CONFIG_KCOV. This typically requires
+ disabling instrumentation for some early boot code.
- With remote DMA, you can monitor the printk buffer remotely using
- firescope and access all memory below 4GB using fireproxy from gdb.
- Even controlling a kernel debugger is possible using remote DMA.
+config CC_HAS_SANCOV_TRACE_PC
+ def_bool $(cc-option,-fsanitize-coverage=trace-pc)
- Usage:
- If ohci1394_dma=early is used as boot parameter, it will initialize
- all OHCI1394 controllers which are found in the PCI config space.
+config KCOV
+ bool "Code coverage for fuzzing"
+ depends on ARCH_HAS_KCOV
+ depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
+ select DEBUG_FS
+ select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
+ help
+ KCOV exposes kernel code coverage information in a form suitable
+ for coverage-guided fuzzing (randomized testing).
- As all changes to the FireWire bus such as enabling and disabling
- devices cause a bus reset and thereby disable remote DMA for all
- devices, be sure to have the cable plugged and FireWire enabled on
- the debugging host before booting the debug target for debugging.
+ If RANDOMIZE_BASE is enabled, PC values will not be stable across
+ different machines and across reboots. If you need stable PC values,
+ disable RANDOMIZE_BASE.
- This code (~1k) is freed after boot. By then, the firewire stack
- in charge of the OHCI-1394 controllers should be used instead.
+ For more details, see Documentation/dev-tools/kcov.rst.
- See Documentation/debugging-via-ohci1394.txt for more information.
+config KCOV_ENABLE_COMPARISONS
+ bool "Enable comparison operands collection by KCOV"
+ depends on KCOV
+ depends on $(cc-option,-fsanitize-coverage=trace-cmp)
+ help
+ KCOV also exposes operands of every comparison in the instrumented
+ code along with operand sizes and PCs of the comparison instructions.
+ These operands can be used by fuzzing engines to improve the quality
+ of fuzzing coverage.
-source "lib/kunit/Kconfig"
+config KCOV_INSTRUMENT_ALL
+ bool "Instrument all code by default"
+ depends on KCOV
+ default y
+ help
+ If you are doing generic system call fuzzing (like e.g. syzkaller),
+ then you will want to instrument the whole kernel and you should
+ say y here. If you are doing more targeted fuzzing (like e.g.
+ filesystem fuzzing with AFL) then you will want to enable coverage
+ for more specific subsets of files, and should say n here.
menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
@@ -2110,62 +2175,7 @@ config MEMTEST
memtest=17, mean do 17 test patterns.
If you are unsure how to answer this question, answer N.
-config BUG_ON_DATA_CORRUPTION
- bool "Trigger a BUG when data corruption is detected"
- select DEBUG_LIST
- help
- Select this option if the kernel should BUG when it encounters
- data corruption in kernel memory structures when they get checked
- for validity.
-
- If unsure, say N.
-
-source "samples/Kconfig"
-
-source "lib/Kconfig.kgdb"
-
-source "lib/Kconfig.ubsan"
-
-config ARCH_HAS_DEVMEM_IS_ALLOWED
- bool
-
-config STRICT_DEVMEM
- bool "Filter access to /dev/mem"
- depends on MMU && DEVMEM
- depends on ARCH_HAS_DEVMEM_IS_ALLOWED
- default y if PPC || X86 || ARM64
- ---help---
- If this option is disabled, you allow userspace (root) access to all
- of memory, including kernel and userspace memory. Accidental
- access to this is obviously disastrous, but specific access can
- be used by people debugging the kernel. Note that with PAT support
- enabled, even in this case there are restrictions on /dev/mem
- use due to the cache aliasing requirements.
-
- If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem
- file only allows userspace access to PCI space and the BIOS code and
- data regions. This is sufficient for dosemu and X and all common
- users of /dev/mem.
-
- If in doubt, say Y.
-
-config IO_STRICT_DEVMEM
- bool "Filter I/O access to /dev/mem"
- depends on STRICT_DEVMEM
- ---help---
- If this option is disabled, you allow userspace (root) access to all
- io-memory regardless of whether a driver is actively using that
- range. Accidental access to this is obviously disastrous, but
- specific access can be used by people debugging kernel drivers.
-
- If this option is switched on, the /dev/mem file only allows
- userspace access to *idle* io-memory ranges (see /proc/iomem) This
- may break traditional users of /dev/mem (dosemu, legacy X, etc...)
- if the driver using a given range cannot be disabled.
-
- If in doubt, say Y.
-source "arch/$(SRCARCH)/Kconfig.debug"
config HYPERV_TESTING
bool "Microsoft Hyper-V driver testing"
@@ -2174,4 +2184,6 @@ config HYPERV_TESTING
help
Select this option to enable Hyper-V vmbus testing.
+endmenu # "Kernel Testing and Coverage"
+
endmenu # Kernel hacking
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 6c9682ce0254..81f5464ea9e1 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -6,6 +6,9 @@ config HAVE_ARCH_KASAN
config HAVE_ARCH_KASAN_SW_TAGS
bool
+config HAVE_ARCH_KASAN_VMALLOC
+ bool
+
config CC_HAS_KASAN_GENERIC
def_bool $(cc-option, -fsanitize=kernel-address)
@@ -142,6 +145,19 @@ config KASAN_SW_TAGS_IDENTIFY
(use-after-free or out-of-bounds) at the cost of increased
memory consumption.
+config KASAN_VMALLOC
+ bool "Back mappings in vmalloc space with real shadow memory"
+ depends on KASAN && HAVE_ARCH_KASAN_VMALLOC
+ help
+ By default, the shadow region for vmalloc space is the read-only
+ zero page. This means that KASAN cannot detect errors involving
+ vmalloc space.
+
+ Enabling this option will hook in to vmap/vmalloc and back those
+ mappings with real shadow memory allocated on demand. This allows
+ for KASAN to detect more sorts of errors (and to support vmapped
+ stacks), but at the cost of higher memory usage.
+
config TEST_KASAN
tristate "Module for testing KASAN for bug detection"
depends on m && KASAN
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index bbe397df04a3..933680b59e2d 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -64,9 +64,9 @@ config KGDB_LOW_LEVEL_TRAP
depends on X86 || MIPS
default n
help
- This will add an extra call back to kgdb for the breakpoint
- exception handler which will allow kgdb to step through a
- notify handler.
+ This will add an extra call back to kgdb for the breakpoint
+ exception handler which will allow kgdb to step through a
+ notify handler.
config KGDB_KDB
bool "KGDB_KDB: include kdb frontend for kgdb"
@@ -96,7 +96,7 @@ config KDB_DEFAULT_ENABLE
The config option merely sets the default at boot time. Both
issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or
- setting with kdb.cmd_enable=X kernel command line option will
+ setting with kdb.cmd_enable=X kernel command line option will
override the default settings.
config KDB_KEYBOARD
diff --git a/lib/Makefile b/lib/Makefile
index c2f0e2a4e4e8..c20b1debe9b4 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -109,7 +109,7 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
-obj-y += logic_pio.o
+lib-y += logic_pio.o
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
@@ -223,7 +223,7 @@ KASAN_SANITIZE_stackdepot.o := n
KCOV_INSTRUMENT_stackdepot.o := n
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
- fdt_empty_tree.o
+ fdt_empty_tree.o fdt_addresses.o
$(foreach file, $(libfdt_files), \
$(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
diff --git a/lib/bitmap.c b/lib/bitmap.c
index f9e834841e94..4250519d7d1c 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -222,6 +222,18 @@ int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_andnot);
+void __bitmap_replace(unsigned long *dst,
+ const unsigned long *old, const unsigned long *new,
+ const unsigned long *mask, unsigned int nbits)
+{
+ unsigned int k;
+ unsigned int nr = BITS_TO_LONGS(nbits);
+
+ for (k = 0; k < nr; k++)
+ dst[k] = (old[k] & ~mask[k]) | (new[k] & mask[k]);
+}
+EXPORT_SYMBOL(__bitmap_replace);
+
int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
diff --git a/lib/devres.c b/lib/devres.c
index d6632c1ee283..f56070cf970b 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -303,7 +303,7 @@ EXPORT_SYMBOL(devm_ioport_unmap);
/*
* PCI iomap devres
*/
-#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
+#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
struct pcim_iomap_devres {
void __iomem *table[PCIM_IOMAP_MAX];
diff --git a/lib/fdt_addresses.c b/lib/fdt_addresses.c
new file mode 100644
index 000000000000..23610bcf390b
--- /dev/null
+++ b/lib/fdt_addresses.c
@@ -0,0 +1,2 @@
+#include <linux/libfdt_env.h>
+#include "../scripts/dtc/libfdt/fdt_addresses.c"
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 5c51eb45178a..e35a76b291e6 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -214,3 +214,17 @@ EXPORT_SYMBOL(find_next_bit_le);
#endif
#endif /* __BIG_ENDIAN */
+
+unsigned long find_next_clump8(unsigned long *clump, const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ offset = find_next_bit(addr, size, offset);
+ if (offset == size)
+ return size;
+
+ offset = round_down(offset, 8);
+ *clump = bitmap_get_value8(addr, offset);
+
+ return offset;
+}
+EXPORT_SYMBOL(find_next_clump8);
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 9fc31292cfa1..7f1244b5294a 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -472,7 +472,7 @@ void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
/**
- * gen_pool_free - free allocated special memory back to the pool
+ * gen_pool_free_owner - free allocated special memory back to the pool
* @pool: pool to free to
* @addr: starting address of memory to free back to pool
* @size: size in bytes of memory to free
@@ -540,7 +540,7 @@ void gen_pool_for_each_chunk(struct gen_pool *pool,
EXPORT_SYMBOL(gen_pool_for_each_chunk);
/**
- * addr_in_gen_pool - checks if an address falls within the range of a pool
+ * gen_pool_has_addr - checks if an address falls within the range of a pool
* @pool: the generic memory pool
* @start: start address
* @size: size of the region
@@ -548,7 +548,7 @@ EXPORT_SYMBOL(gen_pool_for_each_chunk);
* Check if the range of addresses falls within the specified pool. Returns
* true if the entire range is contained in the pool and false otherwise.
*/
-bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
+bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
size_t size)
{
bool found = false;
@@ -567,6 +567,7 @@ bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
rcu_read_unlock();
return found;
}
+EXPORT_SYMBOL(gen_pool_has_addr);
/**
* gen_pool_avail - get available free space of the pool
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index fb29c02c6a3c..51595bf3af85 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1222,11 +1222,12 @@ EXPORT_SYMBOL(iov_iter_discard);
unsigned long iov_iter_alignment(const struct iov_iter *i)
{
- unsigned int p_mask = i->pipe->ring_size - 1;
unsigned long res = 0;
size_t size = i->count;
if (unlikely(iov_iter_is_pipe(i))) {
+ unsigned int p_mask = i->pipe->ring_size - 1;
+
if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
return size | i->iov_offset;
return size;
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
index 905027574e5d..f511a99bb389 100644
--- a/lib/logic_pio.c
+++ b/lib/logic_pio.c
@@ -3,6 +3,7 @@
* Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
* Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
* Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
+ * Author: John Garry <john.garry@huawei.com>
*/
#define pr_fmt(fmt) "LOGIC PIO: " fmt
@@ -39,7 +40,8 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
resource_size_t iio_sz = MMIO_UPPER_LIMIT;
int ret = 0;
- if (!new_range || !new_range->fwnode || !new_range->size)
+ if (!new_range || !new_range->fwnode || !new_range->size ||
+ (new_range->flags == LOGIC_PIO_INDIRECT && !new_range->ops))
return -EINVAL;
start = new_range->hw_start;
@@ -237,7 +239,7 @@ type logic_in##bw(unsigned long addr) \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
- if (entry && entry->ops) \
+ if (entry) \
ret = entry->ops->in(entry->hostdata, \
addr, sizeof(type)); \
else \
@@ -253,7 +255,7 @@ void logic_out##bw(type value, unsigned long addr) \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
- if (entry && entry->ops) \
+ if (entry) \
entry->ops->out(entry->hostdata, \
addr, value, sizeof(type)); \
else \
@@ -261,7 +263,7 @@ void logic_out##bw(type value, unsigned long addr) \
} \
} \
\
-void logic_ins##bw(unsigned long addr, void *buffer, \
+void logic_ins##bw(unsigned long addr, void *buffer, \
unsigned int count) \
{ \
if (addr < MMIO_UPPER_LIMIT) { \
@@ -269,7 +271,7 @@ void logic_ins##bw(unsigned long addr, void *buffer, \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
- if (entry && entry->ops) \
+ if (entry) \
entry->ops->ins(entry->hostdata, \
addr, buffer, sizeof(type), count); \
else \
@@ -286,7 +288,7 @@ void logic_outs##bw(unsigned long addr, const void *buffer, \
} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
struct logic_pio_hwaddr *entry = find_io_range(addr); \
\
- if (entry && entry->ops) \
+ if (entry) \
entry->ops->outs(entry->hostdata, \
addr, buffer, sizeof(type), count); \
else \
diff --git a/lib/math/rational.c b/lib/math/rational.c
index ba7443677c90..31fb27db2deb 100644
--- a/lib/math/rational.c
+++ b/lib/math/rational.c
@@ -3,6 +3,7 @@
* rational fractions
*
* Copyright (C) 2009 emlix GmbH, Oskar Schirmer <oskar@scara.com>
+ * Copyright (C) 2019 Trent Piepho <tpiepho@gmail.com>
*
* helper functions when coping with rational numbers
*/
@@ -10,6 +11,7 @@
#include <linux/rational.h>
#include <linux/compiler.h>
#include <linux/export.h>
+#include <linux/kernel.h>
/*
* calculate best rational approximation for a given fraction
@@ -33,30 +35,65 @@ void rational_best_approximation(
unsigned long max_numerator, unsigned long max_denominator,
unsigned long *best_numerator, unsigned long *best_denominator)
{
- unsigned long n, d, n0, d0, n1, d1;
+ /* n/d is the starting rational, which is continually
+ * decreased each iteration using the Euclidean algorithm.
+ *
+ * dp is the value of d from the prior iteration.
+ *
+ * n2/d2, n1/d1, and n0/d0 are our successively more accurate
+ * approximations of the rational. They are, respectively,
+ * the current, previous, and two prior iterations of it.
+ *
+ * a is current term of the continued fraction.
+ */
+ unsigned long n, d, n0, d0, n1, d1, n2, d2;
n = given_numerator;
d = given_denominator;
n0 = d1 = 0;
n1 = d0 = 1;
+
for (;;) {
- unsigned long t, a;
- if ((n1 > max_numerator) || (d1 > max_denominator)) {
- n1 = n0;
- d1 = d0;
- break;
- }
+ unsigned long dp, a;
+
if (d == 0)
break;
- t = d;
+ /* Find next term in continued fraction, 'a', via
+ * Euclidean algorithm.
+ */
+ dp = d;
a = n / d;
d = n % d;
- n = t;
- t = n0 + a * n1;
+ n = dp;
+
+ /* Calculate the current rational approximation (aka
+ * convergent), n2/d2, using the term just found and
+ * the two prior approximations.
+ */
+ n2 = n0 + a * n1;
+ d2 = d0 + a * d1;
+
+ /* If the current convergent exceeds the maxes, then
+ * return either the previous convergent or the
+ * largest semi-convergent, the final term of which is
+ * found below as 't'.
+ */
+ if ((n2 > max_numerator) || (d2 > max_denominator)) {
+ unsigned long t = min((max_numerator - n0) / n1,
+ (max_denominator - d0) / d1);
+
+ /* This tests if the semi-convergent is closer
+ * than the previous convergent.
+ */
+ if (2u * t > a || (2u * t == a && d0 * dp > d1 * d)) {
+ n1 = n0 + t * n1;
+ d1 = d0 + t * d1;
+ }
+ break;
+ }
n0 = n1;
- n1 = t;
- t = d0 + a * d1;
+ n1 = n2;
d0 = d1;
- d1 = t;
+ d1 = d2;
}
*best_numerator = n1;
*best_denominator = d1;
diff --git a/lib/raid6/unroll.awk b/lib/raid6/unroll.awk
index c6aa03631df8..0809805a7e23 100644
--- a/lib/raid6/unroll.awk
+++ b/lib/raid6/unroll.awk
@@ -13,7 +13,7 @@ BEGIN {
for (i = 0; i < rep; ++i) {
tmp = $0
gsub(/\$\$/, i, tmp)
- gsub(/\$\#/, n, tmp)
+ gsub(/\$#/, n, tmp)
gsub(/\$\*/, "$", tmp)
print tmp
}
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 33feec8989f1..af88d1346dd7 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -650,8 +650,8 @@ void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
if (!sbq_wait->sbq) {
sbq_wait->sbq = sbq;
atomic_inc(&sbq->ws_active);
+ add_wait_queue(&ws->wait, &sbq_wait->wait);
}
- add_wait_queue(&ws->wait, &sbq_wait->wait);
}
EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index dccb95af6003..706020b06617 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -30,13 +30,6 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src,
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
unsigned long res = 0;
- /*
- * Truncate 'max' to the user-specified limit, so that
- * we only have one limit we need to check in the loop
- */
- if (max > count)
- max = count;
-
if (IS_UNALIGNED(src, dst))
goto byte_at_a_time;
@@ -114,6 +107,13 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
unsigned long max = max_addr - src_addr;
long retval;
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
kasan_check_write(dst, count);
check_object_size(dst, count, false);
if (user_access_begin(src, max)) {
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 6c0005d5dd5c..41670d4a5816 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -27,13 +27,6 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
unsigned long c;
/*
- * Truncate 'max' to the user-specified limit, so that
- * we only have one limit we need to check in the loop
- */
- if (max > count)
- max = count;
-
- /*
* Do everything aligned. But that means that we
* need to also expand the maximum..
*/
@@ -109,6 +102,13 @@ long strnlen_user(const char __user *str, long count)
unsigned long max = max_addr - src_addr;
long retval;
+ /*
+ * Truncate 'max' to the user-specified limit, so that
+ * we only have one limit we need to check in the loop
+ */
+ if (max > count)
+ max = count;
+
if (user_access_begin(str, max)) {
retval = do_strnlen_user(str, count, max);
user_access_end();
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 51a98f7ee79e..e14a15ac250b 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Test cases for printf facility.
+ * Test cases for bitmap API.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -21,6 +21,39 @@ static unsigned failed_tests __initdata;
static char pbl_buffer[PAGE_SIZE] __initdata;
+static const unsigned long exp1[] __initconst = {
+ BITMAP_FROM_U64(1),
+ BITMAP_FROM_U64(2),
+ BITMAP_FROM_U64(0x0000ffff),
+ BITMAP_FROM_U64(0xffff0000),
+ BITMAP_FROM_U64(0x55555555),
+ BITMAP_FROM_U64(0xaaaaaaaa),
+ BITMAP_FROM_U64(0x11111111),
+ BITMAP_FROM_U64(0x22222222),
+ BITMAP_FROM_U64(0xffffffff),
+ BITMAP_FROM_U64(0xfffffffe),
+ BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0xffffffff77777777ULL),
+ BITMAP_FROM_U64(0),
+};
+
+static const unsigned long exp2[] __initconst = {
+ BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0xffffffff77777777ULL),
+};
+
+/* Fibonacci sequence */
+static const unsigned long exp2_to_exp3_mask[] __initconst = {
+ BITMAP_FROM_U64(0x008000020020212eULL),
+};
+/* exp3_0_1 = (exp2[0] & ~exp2_to_exp3_mask) | (exp2[1] & exp2_to_exp3_mask) */
+static const unsigned long exp3_0_1[] __initconst = {
+ BITMAP_FROM_U64(0x33b3333311313137ULL),
+};
+/* exp3_1_0 = (exp2[1] & ~exp2_to_exp3_mask) | (exp2[0] & exp2_to_exp3_mask) */
+static const unsigned long exp3_1_0[] __initconst = {
+ BITMAP_FROM_U64(0xff7fffff77575751ULL),
+};
static bool __init
__check_eq_uint(const char *srcfile, unsigned int line,
@@ -92,6 +125,36 @@ __check_eq_u32_array(const char *srcfile, unsigned int line,
return true;
}
+static bool __init __check_eq_clump8(const char *srcfile, unsigned int line,
+ const unsigned int offset,
+ const unsigned int size,
+ const unsigned char *const clump_exp,
+ const unsigned long *const clump)
+{
+ unsigned long exp;
+
+ if (offset >= size) {
+ pr_warn("[%s:%u] bit offset for clump out-of-bounds: expected less than %u, got %u\n",
+ srcfile, line, size, offset);
+ return false;
+ }
+
+ exp = clump_exp[offset / 8];
+ if (!exp) {
+ pr_warn("[%s:%u] bit offset for zero clump: expected nonzero clump, got bit offset %u with clump value 0",
+ srcfile, line, offset);
+ return false;
+ }
+
+ if (*clump != exp) {
+ pr_warn("[%s:%u] expected clump value of 0x%lX, got clump value of 0x%lX",
+ srcfile, line, exp, *clump);
+ return false;
+ }
+
+ return true;
+}
+
#define __expect_eq(suffix, ...) \
({ \
int result = 0; \
@@ -108,6 +171,7 @@ __check_eq_u32_array(const char *srcfile, unsigned int line,
#define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__)
#define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__)
#define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__)
+#define expect_eq_clump8(...) __expect_eq(clump8, ##__VA_ARGS__)
static void __init test_zero_clear(void)
{
@@ -206,6 +270,30 @@ static void __init test_copy(void)
expect_eq_pbl("0-108,128-1023", bmap2, 1024);
}
+#define EXP2_IN_BITS (sizeof(exp2) * 8)
+
+static void __init test_replace(void)
+{
+ unsigned int nbits = 64;
+ DECLARE_BITMAP(bmap, 1024);
+
+ bitmap_zero(bmap, 1024);
+ bitmap_replace(bmap, &exp2[0], &exp2[1], exp2_to_exp3_mask, nbits);
+ expect_eq_bitmap(bmap, exp3_0_1, nbits);
+
+ bitmap_zero(bmap, 1024);
+ bitmap_replace(bmap, &exp2[1], &exp2[0], exp2_to_exp3_mask, nbits);
+ expect_eq_bitmap(bmap, exp3_1_0, nbits);
+
+ bitmap_fill(bmap, 1024);
+ bitmap_replace(bmap, &exp2[0], &exp2[1], exp2_to_exp3_mask, nbits);
+ expect_eq_bitmap(bmap, exp3_0_1, nbits);
+
+ bitmap_fill(bmap, 1024);
+ bitmap_replace(bmap, &exp2[1], &exp2[0], exp2_to_exp3_mask, nbits);
+ expect_eq_bitmap(bmap, exp3_1_0, nbits);
+}
+
#define PARSE_TIME 0x1
struct test_bitmap_parselist{
@@ -216,53 +304,32 @@ struct test_bitmap_parselist{
const int flags;
};
-static const unsigned long exp[] __initconst = {
- BITMAP_FROM_U64(1),
- BITMAP_FROM_U64(2),
- BITMAP_FROM_U64(0x0000ffff),
- BITMAP_FROM_U64(0xffff0000),
- BITMAP_FROM_U64(0x55555555),
- BITMAP_FROM_U64(0xaaaaaaaa),
- BITMAP_FROM_U64(0x11111111),
- BITMAP_FROM_U64(0x22222222),
- BITMAP_FROM_U64(0xffffffff),
- BITMAP_FROM_U64(0xfffffffe),
- BITMAP_FROM_U64(0x3333333311111111ULL),
- BITMAP_FROM_U64(0xffffffff77777777ULL),
- BITMAP_FROM_U64(0),
-};
-
-static const unsigned long exp2[] __initconst = {
- BITMAP_FROM_U64(0x3333333311111111ULL),
- BITMAP_FROM_U64(0xffffffff77777777ULL)
-};
-
static const struct test_bitmap_parselist parselist_tests[] __initconst = {
#define step (sizeof(u64) / sizeof(unsigned long))
- {0, "0", &exp[0], 8, 0},
- {0, "1", &exp[1 * step], 8, 0},
- {0, "0-15", &exp[2 * step], 32, 0},
- {0, "16-31", &exp[3 * step], 32, 0},
- {0, "0-31:1/2", &exp[4 * step], 32, 0},
- {0, "1-31:1/2", &exp[5 * step], 32, 0},
- {0, "0-31:1/4", &exp[6 * step], 32, 0},
- {0, "1-31:1/4", &exp[7 * step], 32, 0},
- {0, "0-31:4/4", &exp[8 * step], 32, 0},
- {0, "1-31:4/4", &exp[9 * step], 32, 0},
- {0, "0-31:1/4,32-63:2/4", &exp[10 * step], 64, 0},
- {0, "0-31:3/4,32-63:4/4", &exp[11 * step], 64, 0},
- {0, " ,, 0-31:3/4 ,, 32-63:4/4 ,, ", &exp[11 * step], 64, 0},
+ {0, "0", &exp1[0], 8, 0},
+ {0, "1", &exp1[1 * step], 8, 0},
+ {0, "0-15", &exp1[2 * step], 32, 0},
+ {0, "16-31", &exp1[3 * step], 32, 0},
+ {0, "0-31:1/2", &exp1[4 * step], 32, 0},
+ {0, "1-31:1/2", &exp1[5 * step], 32, 0},
+ {0, "0-31:1/4", &exp1[6 * step], 32, 0},
+ {0, "1-31:1/4", &exp1[7 * step], 32, 0},
+ {0, "0-31:4/4", &exp1[8 * step], 32, 0},
+ {0, "1-31:4/4", &exp1[9 * step], 32, 0},
+ {0, "0-31:1/4,32-63:2/4", &exp1[10 * step], 64, 0},
+ {0, "0-31:3/4,32-63:4/4", &exp1[11 * step], 64, 0},
+ {0, " ,, 0-31:3/4 ,, 32-63:4/4 ,, ", &exp1[11 * step], 64, 0},
{0, "0-31:1/4,32-63:2/4,64-95:3/4,96-127:4/4", exp2, 128, 0},
{0, "0-2047:128/256", NULL, 2048, PARSE_TIME},
- {0, "", &exp[12 * step], 8, 0},
- {0, "\n", &exp[12 * step], 8, 0},
- {0, ",, ,, , , ,", &exp[12 * step], 8, 0},
- {0, " , ,, , , ", &exp[12 * step], 8, 0},
- {0, " , ,, , , \n", &exp[12 * step], 8, 0},
+ {0, "", &exp1[12 * step], 8, 0},
+ {0, "\n", &exp1[12 * step], 8, 0},
+ {0, ",, ,, , , ,", &exp1[12 * step], 8, 0},
+ {0, " , ,, , , ", &exp1[12 * step], 8, 0},
+ {0, " , ,, , , \n", &exp1[12 * step], 8, 0},
{-EINVAL, "-1", NULL, 8, 0},
{-EINVAL, "-0", NULL, 8, 0},
@@ -280,6 +347,8 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
{-EINVAL, "a-31:10/1", NULL, 8, 0},
{-EINVAL, "0-31:a/1", NULL, 8, 0},
{-EINVAL, "0-\n", NULL, 8, 0},
+
+#undef step
};
static void __init __test_bitmap_parselist(int is_user)
@@ -299,7 +368,7 @@ static void __init __test_bitmap_parselist(int is_user)
set_fs(KERNEL_DS);
time = ktime_get();
- err = bitmap_parselist_user(ptest.in, len,
+ err = bitmap_parselist_user((__force const char __user *)ptest.in, len,
bmap, ptest.nbits);
time = ktime_get() - time;
set_fs(orig_fs);
@@ -326,6 +395,8 @@ static void __init __test_bitmap_parselist(int is_user)
if (ptest.flags & PARSE_TIME)
pr_err("parselist%s: %d: input is '%s' OK, Time: %llu\n",
mode, i, ptest.in, time);
+
+#undef ptest
}
}
@@ -339,20 +410,20 @@ static void __init test_bitmap_parselist_user(void)
__test_bitmap_parselist(1);
}
-#define EXP_BYTES (sizeof(exp) * 8)
+#define EXP1_IN_BITS (sizeof(exp1) * 8)
static void __init test_bitmap_arr32(void)
{
unsigned int nbits, next_bit;
- u32 arr[sizeof(exp) / 4];
- DECLARE_BITMAP(bmap2, EXP_BYTES);
+ u32 arr[EXP1_IN_BITS / 32];
+ DECLARE_BITMAP(bmap2, EXP1_IN_BITS);
memset(arr, 0xa5, sizeof(arr));
- for (nbits = 0; nbits < EXP_BYTES; ++nbits) {
- bitmap_to_arr32(arr, exp, nbits);
+ for (nbits = 0; nbits < EXP1_IN_BITS; ++nbits) {
+ bitmap_to_arr32(arr, exp1, nbits);
bitmap_from_arr32(bmap2, arr, nbits);
- expect_eq_bitmap(bmap2, exp, nbits);
+ expect_eq_bitmap(bmap2, exp1, nbits);
next_bit = find_next_bit(bmap2,
round_up(nbits, BITS_PER_LONG), nbits);
@@ -361,7 +432,7 @@ static void __init test_bitmap_arr32(void)
" tail is not safely cleared: %d\n",
nbits, next_bit);
- if (nbits < EXP_BYTES - 32)
+ if (nbits < EXP1_IN_BITS - 32)
expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)],
0xa5a5a5a5);
}
@@ -404,15 +475,50 @@ static void noinline __init test_mem_optimisations(void)
}
}
+static const unsigned char clump_exp[] __initconst = {
+ 0x01, /* 1 bit set */
+ 0x02, /* non-edge 1 bit set */
+ 0x00, /* zero bits set */
+ 0x38, /* 3 bits set across 4-bit boundary */
+ 0x38, /* Repeated clump */
+ 0x0F, /* 4 bits set */
+ 0xFF, /* all bits set */
+ 0x05, /* non-adjacent 2 bits set */
+};
+
+static void __init test_for_each_set_clump8(void)
+{
+#define CLUMP_EXP_NUMBITS 64
+ DECLARE_BITMAP(bits, CLUMP_EXP_NUMBITS);
+ unsigned int start;
+ unsigned long clump;
+
+ /* set bitmap to test case */
+ bitmap_zero(bits, CLUMP_EXP_NUMBITS);
+ bitmap_set(bits, 0, 1); /* 0x01 */
+ bitmap_set(bits, 9, 1); /* 0x02 */
+ bitmap_set(bits, 27, 3); /* 0x28 */
+ bitmap_set(bits, 35, 3); /* 0x28 */
+ bitmap_set(bits, 40, 4); /* 0x0F */
+ bitmap_set(bits, 48, 8); /* 0xFF */
+ bitmap_set(bits, 56, 1); /* 0x05 - part 1 */
+ bitmap_set(bits, 58, 1); /* 0x05 - part 2 */
+
+ for_each_set_clump8(start, clump, bits, CLUMP_EXP_NUMBITS)
+ expect_eq_clump8(start, CLUMP_EXP_NUMBITS, clump_exp, &clump);
+}
+
static void __init selftest(void)
{
test_zero_clear();
test_fill_set();
test_copy();
+ test_replace();
test_bitmap_arr32();
test_bitmap_parselist();
test_bitmap_parselist_user();
test_mem_optimisations();
+ test_for_each_set_clump8();
}
KSTM_MODULE_LOADERS(test_bitmap);
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 49cc4d570a40..328d33beae36 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/vmalloc.h>
#include <asm/page.h>
@@ -748,6 +749,30 @@ static noinline void __init kmalloc_double_kzfree(void)
kzfree(ptr);
}
+#ifdef CONFIG_KASAN_VMALLOC
+static noinline void __init vmalloc_oob(void)
+{
+ void *area;
+
+ pr_info("vmalloc out-of-bounds\n");
+
+ /*
+ * We have to be careful not to hit the guard page.
+ * The MMU will catch that and crash us.
+ */
+ area = vmalloc(3000);
+ if (!area) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+
+ ((volatile char *)area)[3100];
+ vfree(area);
+}
+#else
+static void __init vmalloc_oob(void) {}
+#endif
+
static int __init kmalloc_tests_init(void)
{
/*
@@ -793,6 +818,7 @@ static int __init kmalloc_tests_init(void)
kasan_strings();
kasan_bitops();
kmalloc_double_kzfree();
+ vmalloc_oob();
kasan_restore_multi_shot(multishot);
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 9742e5cb853a..e4f706a404b3 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -183,6 +183,9 @@ static bool __init check_buf(void *buf, int size, bool want_ctor,
return fail;
}
+#define BULK_SIZE 100
+static void *bulk_array[BULK_SIZE];
+
/*
* Test kmem_cache with given parameters:
* want_ctor - use a constructor;
@@ -203,9 +206,24 @@ static int __init do_kmem_cache_size(size_t size, bool want_ctor,
want_rcu ? SLAB_TYPESAFE_BY_RCU : 0,
want_ctor ? test_ctor : NULL);
for (iter = 0; iter < 10; iter++) {
+ /* Do a test of bulk allocations */
+ if (!want_rcu && !want_ctor) {
+ int ret;
+
+ ret = kmem_cache_alloc_bulk(c, alloc_mask, BULK_SIZE, bulk_array);
+ if (!ret) {
+ fail = true;
+ } else {
+ int i;
+ for (i = 0; i < ret; i++)
+ fail |= check_buf(bulk_array[i], size, want_ctor, want_rcu, want_zero);
+ kmem_cache_free_bulk(c, ret, bulk_array);
+ }
+ }
+
buf = kmem_cache_alloc(c, alloc_mask);
/* Check that buf is zeroed, if it must be. */
- fail = check_buf(buf, size, want_ctor, want_rcu, want_zero);
+ fail |= check_buf(buf, size, want_ctor, want_rcu, want_zero);
fill_with_garbage_skip(buf, size, want_ctor ? CTOR_BYTES : 0);
if (!want_rcu) {
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 7df4f7f395bf..55c14e8c8859 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -2,6 +2,7 @@
/*
* test_xarray.c: Test the XArray API
* Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2019-2020 Oracle
* Author: Matthew Wilcox <willy@infradead.org>
*/
@@ -902,28 +903,34 @@ static noinline void check_store_iter(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
}
-static noinline void check_multi_find(struct xarray *xa)
+static noinline void check_multi_find_1(struct xarray *xa, unsigned order)
{
#ifdef CONFIG_XARRAY_MULTI
+ unsigned long multi = 3 << order;
+ unsigned long next = 4 << order;
unsigned long index;
- xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
- XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
+ xa_store_order(xa, multi, order, xa_mk_value(multi), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL) != NULL);
+ XA_BUG_ON(xa, xa_store_index(xa, next + 1, GFP_KERNEL) != NULL);
index = 0;
XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
- xa_mk_value(12));
- XA_BUG_ON(xa, index != 12);
- index = 13;
+ xa_mk_value(multi));
+ XA_BUG_ON(xa, index != multi);
+ index = multi + 1;
XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
- xa_mk_value(12));
- XA_BUG_ON(xa, (index < 12) || (index >= 16));
+ xa_mk_value(multi));
+ XA_BUG_ON(xa, (index < multi) || (index >= next));
XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
- xa_mk_value(16));
- XA_BUG_ON(xa, index != 16);
-
- xa_erase_index(xa, 12);
- xa_erase_index(xa, 16);
+ xa_mk_value(next));
+ XA_BUG_ON(xa, index != next);
+ XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL);
+ XA_BUG_ON(xa, index != next);
+
+ xa_erase_index(xa, multi);
+ xa_erase_index(xa, next);
+ xa_erase_index(xa, next + 1);
XA_BUG_ON(xa, !xa_empty(xa));
#endif
}
@@ -1046,12 +1053,33 @@ static noinline void check_find_3(struct xarray *xa)
xa_destroy(xa);
}
+static noinline void check_find_4(struct xarray *xa)
+{
+ unsigned long index = 0;
+ void *entry;
+
+ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+ XA_BUG_ON(xa, entry != xa_mk_index(ULONG_MAX));
+
+ entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT);
+ XA_BUG_ON(xa, entry);
+
+ xa_erase_index(xa, ULONG_MAX);
+}
+
static noinline void check_find(struct xarray *xa)
{
+ unsigned i;
+
check_find_1(xa);
check_find_2(xa);
check_find_3(xa);
- check_multi_find(xa);
+ check_find_4(xa);
+
+ for (i = 2; i < 10; i++)
+ check_multi_find_1(xa, i);
check_multi_find_2(xa);
}
@@ -1132,6 +1160,27 @@ static noinline void check_move_tiny(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_move_max(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+ rcu_read_unlock();
+
+ xas_set(&xas, 0);
+ rcu_read_lock();
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_index(ULONG_MAX));
+ xas_pause(&xas);
+ XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL);
+ rcu_read_unlock();
+
+ xa_erase_index(xa, ULONG_MAX);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+
static noinline void check_move_small(struct xarray *xa, unsigned long idx)
{
XA_STATE(xas, xa, 0);
@@ -1240,6 +1289,7 @@ static noinline void check_move(struct xarray *xa)
xa_destroy(xa);
check_move_tiny(xa);
+ check_move_max(xa);
for (i = 0; i < 16; i++)
check_move_small(xa, 1UL << i);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index fc552d524ef7..7b9b58aee72c 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -140,25 +140,21 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type,
}
}
-static DEFINE_SPINLOCK(report_lock);
-
-static void ubsan_prologue(struct source_location *location,
- unsigned long *flags)
+static void ubsan_prologue(struct source_location *location)
{
current->in_ubsan++;
- spin_lock_irqsave(&report_lock, *flags);
pr_err("========================================"
"========================================\n");
print_source_location("UBSAN: Undefined behaviour in", location);
}
-static void ubsan_epilogue(unsigned long *flags)
+static void ubsan_epilogue(void)
{
dump_stack();
pr_err("========================================"
"========================================\n");
- spin_unlock_irqrestore(&report_lock, *flags);
+
current->in_ubsan--;
}
@@ -167,14 +163,13 @@ static void handle_overflow(struct overflow_data *data, void *lhs,
{
struct type_descriptor *type = data->type;
- unsigned long flags;
char lhs_val_str[VALUE_LENGTH];
char rhs_val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location);
val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
@@ -186,7 +181,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs,
rhs_val_str,
type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
void __ubsan_handle_add_overflow(struct overflow_data *data,
@@ -214,20 +209,19 @@ EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
void __ubsan_handle_negate_overflow(struct overflow_data *data,
void *old_val)
{
- unsigned long flags;
char old_val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location);
val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
pr_err("negation of %s cannot be represented in type %s:\n",
old_val_str, data->type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
@@ -235,13 +229,12 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
void __ubsan_handle_divrem_overflow(struct overflow_data *data,
void *lhs, void *rhs)
{
- unsigned long flags;
char rhs_val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location);
val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs);
@@ -251,58 +244,52 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data,
else
pr_err("division by zero\n");
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_divrem_overflow);
static void handle_null_ptr_deref(struct type_mismatch_data_common *data)
{
- unsigned long flags;
-
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location, &flags);
+ ubsan_prologue(data->location);
pr_err("%s null pointer of type %s\n",
type_check_kinds[data->type_check_kind],
data->type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
static void handle_misaligned_access(struct type_mismatch_data_common *data,
unsigned long ptr)
{
- unsigned long flags;
-
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location, &flags);
+ ubsan_prologue(data->location);
pr_err("%s misaligned address %p for type %s\n",
type_check_kinds[data->type_check_kind],
(void *)ptr, data->type->type_name);
pr_err("which requires %ld byte alignment\n", data->alignment);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
static void handle_object_size_mismatch(struct type_mismatch_data_common *data,
unsigned long ptr)
{
- unsigned long flags;
-
if (suppress_report(data->location))
return;
- ubsan_prologue(data->location, &flags);
+ ubsan_prologue(data->location);
pr_err("%s address %p with insufficient space\n",
type_check_kinds[data->type_check_kind],
(void *) ptr);
pr_err("for an object of type %s\n", data->type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data,
@@ -351,25 +338,23 @@ EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1);
void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index)
{
- unsigned long flags;
char index_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location);
val_to_string(index_str, sizeof(index_str), data->index_type, index);
pr_err("index %s is out of range for type %s\n", index_str,
data->array_type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_out_of_bounds);
void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
void *lhs, void *rhs)
{
- unsigned long flags;
struct type_descriptor *rhs_type = data->rhs_type;
struct type_descriptor *lhs_type = data->lhs_type;
char rhs_str[VALUE_LENGTH];
@@ -379,7 +364,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
if (suppress_report(&data->location))
goto out;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location);
val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs);
val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs);
@@ -402,7 +387,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
lhs_str, rhs_str,
lhs_type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
out:
user_access_restore(ua_flags);
}
@@ -411,11 +396,9 @@ EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
{
- unsigned long flags;
-
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location);
pr_err("calling __builtin_unreachable()\n");
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
panic("can't return from __builtin_unreachable()");
}
EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
@@ -423,19 +406,18 @@ EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
void __ubsan_handle_load_invalid_value(struct invalid_value_data *data,
void *val)
{
- unsigned long flags;
char val_str[VALUE_LENGTH];
if (suppress_report(&data->location))
return;
- ubsan_prologue(&data->location, &flags);
+ ubsan_prologue(&data->location);
val_to_string(val_str, sizeof(val_str), data->type, val);
pr_err("load of value %s is not a valid value for type %s\n",
val_str, data->type->type_name);
- ubsan_epilogue(&flags);
+ ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 9ecfd3b547ba..42bd8ab955fa 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -221,6 +221,7 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
return 0;
}
+static __maybe_unused
int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
{
int ret = __cvdso_clock_getres_common(clock, res);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index dee8fc467fcf..7c488a1ce318 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -761,11 +761,38 @@ static int __init initialize_ptr_random(void)
early_initcall(initialize_ptr_random);
/* Maps a pointer to a 32 bit unique identifier. */
+static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
+{
+ unsigned long hashval;
+
+ if (static_branch_unlikely(&not_filled_random_ptr_key))
+ return -EAGAIN;
+
+#ifdef CONFIG_64BIT
+ hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+ /*
+ * Mask off the first 32 bits, this makes explicit that we have
+ * modified the address (and 32 bits is plenty for a unique ID).
+ */
+ hashval = hashval & 0xffffffff;
+#else
+ hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
+#endif
+ *hashval_out = hashval;
+ return 0;
+}
+
+int ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
+{
+ return __ptr_to_hashval(ptr, hashval_out);
+}
+
static char *ptr_to_id(char *buf, char *end, const void *ptr,
struct printf_spec spec)
{
const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
unsigned long hashval;
+ int ret;
/* When debugging early boot use non-cryptographically secure hash. */
if (unlikely(debug_boot_weak_hash)) {
@@ -773,22 +800,13 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr,
return pointer_string(buf, end, (const void *)hashval, spec);
}
- if (static_branch_unlikely(&not_filled_random_ptr_key)) {
+ ret = __ptr_to_hashval(ptr, &hashval);
+ if (ret) {
spec.field_width = 2 * sizeof(ptr);
/* string length must be less than default_width */
return error_string(buf, end, str, spec);
}
-#ifdef CONFIG_64BIT
- hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
- /*
- * Mask off the first 32 bits, this makes explicit that we have
- * modified the address (and 32 bits is plenty for a unique ID).
- */
- hashval = hashval & 0xffffffff;
-#else
- hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
-#endif
return pointer_string(buf, end, (const void *)hashval, spec);
}
diff --git a/lib/xarray.c b/lib/xarray.c
index 1237c213f52b..1d9fab7db8da 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* XArray implementation
- * Copyright (c) 2017 Microsoft Corporation
+ * Copyright (c) 2017-2018 Microsoft Corporation
+ * Copyright (c) 2018-2020 Oracle
* Author: Matthew Wilcox <willy@infradead.org>
*/
@@ -967,6 +968,7 @@ void xas_pause(struct xa_state *xas)
if (xas_invalid(xas))
return;
+ xas->xa_node = XAS_RESTART;
if (node) {
unsigned int offset = xas->xa_offset;
while (++offset < XA_CHUNK_SIZE) {
@@ -974,10 +976,11 @@ void xas_pause(struct xa_state *xas)
break;
}
xas->xa_index += (offset - xas->xa_offset) << node->shift;
+ if (xas->xa_index == 0)
+ xas->xa_node = XAS_BOUNDS;
} else {
xas->xa_index++;
}
- xas->xa_node = XAS_RESTART;
}
EXPORT_SYMBOL_GPL(xas_pause);
@@ -1079,13 +1082,15 @@ void *xas_find(struct xa_state *xas, unsigned long max)
{
void *entry;
- if (xas_error(xas))
+ if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
return NULL;
+ if (xas->xa_index > max)
+ return set_bounds(xas);
if (!xas->xa_node) {
xas->xa_index = 1;
return set_bounds(xas);
- } else if (xas_top(xas->xa_node)) {
+ } else if (xas->xa_node == XAS_RESTART) {
entry = xas_load(xas);
if (entry || xas_not_node(xas->xa_node))
return entry;
@@ -1150,6 +1155,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
if (xas_error(xas))
return NULL;
+ if (xas->xa_index > max)
+ goto max;
if (!xas->xa_node) {
xas->xa_index = 1;
@@ -1824,6 +1831,17 @@ void *xa_find(struct xarray *xa, unsigned long *indexp,
}
EXPORT_SYMBOL(xa_find);
+static bool xas_sibling(struct xa_state *xas)
+{
+ struct xa_node *node = xas->xa_node;
+ unsigned long mask;
+
+ if (!node)
+ return false;
+ mask = (XA_CHUNK_SIZE << node->shift) - 1;
+ return (xas->xa_index & mask) > (xas->xa_offset << node->shift);
+}
+
/**
* xa_find_after() - Search the XArray for a present entry.
* @xa: XArray.
@@ -1847,21 +1865,20 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp,
XA_STATE(xas, xa, *indexp + 1);
void *entry;
+ if (xas.xa_index == 0)
+ return NULL;
+
rcu_read_lock();
for (;;) {
if ((__force unsigned int)filter < XA_MAX_MARKS)
entry = xas_find_marked(&xas, max, filter);
else
entry = xas_find(&xas, max);
- if (xas.xa_node == XAS_BOUNDS)
+
+ if (xas_invalid(&xas))
break;
- if (xas.xa_shift) {
- if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
- continue;
- } else {
- if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
- continue;
- }
+ if (xas_sibling(&xas))
+ continue;
if (!xas_retry(&xas, entry))
break;
}