aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--lib/Kconfig7
-rw-r--r--lib/Kconfig.debug67
-rw-r--r--lib/Kconfig.kgdb13
-rw-r--r--lib/Makefile12
-rw-r--r--lib/bcd.c14
-rw-r--r--lib/bitmap.c16
-rw-r--r--lib/bitrev.c3
-rw-r--r--lib/bug.c2
-rw-r--r--lib/cmdline.c16
-rw-r--r--lib/cpumask.c9
-rw-r--r--lib/crc-t10dif.c67
-rw-r--r--lib/debugobjects.c34
-rw-r--r--lib/devres.c2
-rw-r--r--lib/div64.c10
-rw-r--r--lib/hexdump.c7
-rw-r--r--lib/idr.c142
-rw-r--r--lib/inflate.c52
-rw-r--r--lib/iomap.c3
-rw-r--r--lib/iommu-helper.c8
-rw-r--r--lib/kernel_lock.c120
-rw-r--r--lib/kobject.c20
-rw-r--r--lib/kobject_uevent.c9
-rw-r--r--lib/list_debug.c50
-rw-r--r--lib/lmb.c46
-rw-r--r--lib/lzo/lzo1x_decompress.c6
-rw-r--r--lib/parser.c32
-rw-r--r--lib/percpu_counter.c7
-rw-r--r--lib/plist.c13
-rw-r--r--lib/radix-tree.c302
-rw-r--r--lib/ratelimit.c56
-rw-r--r--lib/scatterlist.c176
-rw-r--r--lib/show_mem.c63
-rw-r--r--lib/smp_processor_id.c11
-rw-r--r--lib/swiotlb.c4
-rw-r--r--lib/syscall.c75
-rw-r--r--lib/textsearch.c17
-rw-r--r--lib/ts_bm.c28
-rw-r--r--lib/ts_fsm.c6
-rw-r--r--lib/ts_kmp.c29
-rw-r--r--lib/vsprintf.c128
40 files changed, 1208 insertions, 474 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 8cc8e8722a3f..c7ad7a5b3535 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -29,6 +29,13 @@ config CRC16
the kernel tree does. Such modules that use library CRC16
functions require M here.
+config CRC_T10DIF
+ tristate "CRC calculation for the T10 Data Integrity Field"
+ help
+ This option is only needed if a module that's not in the
+ kernel tree needs to calculate CRC checks for use with the
+ SCSI data integrity subsystem.
+
config CRC_ITU_T
tristate "CRC ITU-T V.41 functions"
help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d2099f41aa1e..e1d4764435ed 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -74,6 +74,9 @@ config DEBUG_FS
debugging files into. Enable this option to be able to read and
write to these files.
+ For detailed documentation on the debugfs API, see
+ Documentation/DocBook/filesystems.
+
If unsure, say N.
config HEADERS_CHECK
@@ -147,7 +150,7 @@ config DETECT_SOFTLOCKUP
help
Say Y here to enable the kernel to detect "soft lockups",
which are bugs that cause the kernel to loop in kernel
- mode for more than 10 seconds, without giving other tasks a
+ mode for more than 60 seconds, without giving other tasks a
chance to run.
When a soft-lockup is detected, the kernel will print the
@@ -159,6 +162,30 @@ config DETECT_SOFTLOCKUP
can be detected via the NMI-watchdog, on platforms that
support it.)
+config BOOTPARAM_SOFTLOCKUP_PANIC
+ bool "Panic (Reboot) On Soft Lockups"
+ depends on DETECT_SOFTLOCKUP
+ help
+ Say Y here to enable the kernel to panic on "soft lockups",
+ which are bugs that cause the kernel to loop in kernel
+ mode for more than 60 seconds, without giving other tasks a
+ chance to run.
+
+ The panic can be used in combination with panic_timeout,
+ to cause the system to reboot automatically after a
+ lockup has been detected. This feature is useful for
+ high-availability systems that have uptime guarantees and
+ where a lockup must be resolved ASAP.
+
+ Say N if unsure.
+
+config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
+ int
+ depends on DETECT_SOFTLOCKUP
+ range 0 1
+ default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
+ default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
+
config SCHED_DEBUG
bool "Collect scheduler debugging info"
depends on DEBUG_KERNEL && PROC_FS
@@ -419,7 +446,6 @@ config DEBUG_LOCKING_API_SELFTESTS
config STACKTRACE
bool
- depends on DEBUG_KERNEL
depends on STACKTRACE_SUPPORT
config DEBUG_KOBJECT
@@ -479,6 +505,18 @@ config DEBUG_WRITECOUNT
If unsure, say N.
+config DEBUG_MEMORY_INIT
+ bool "Debug memory initialisation" if EMBEDDED
+ default !EMBEDDED
+ help
+ Enable this for additional checks during memory initialisation.
+ The sanity checks verify aspects of the VM such as the memory model
+ and other information provided by the architecture. Verbose
+ information will be printed at KERN_DEBUG loglevel depending
+ on the mminit_loglevel= command-line option.
+
+ If unsure, say Y
+
config DEBUG_LIST
bool "Debug linked list manipulation"
depends on DEBUG_KERNEL
@@ -531,16 +569,34 @@ config BOOT_PRINTK_DELAY
config RCU_TORTURE_TEST
tristate "torture tests for RCU"
depends on DEBUG_KERNEL
- depends on m
default n
help
This option provides a kernel module that runs torture tests
on the RCU infrastructure. The kernel module may be built
after the fact on the running kernel to be tested, if desired.
+ Say Y here if you want RCU torture tests to be built into
+ the kernel.
Say M if you want the RCU torture tests to build as a module.
Say N if you are unsure.
+config RCU_TORTURE_TEST_RUNNABLE
+ bool "torture tests for RCU runnable by default"
+ depends on RCU_TORTURE_TEST = y
+ default n
+ help
+ This option provides a way to build the RCU torture tests
+ directly into the kernel without them starting up at boot
+ time. You can use /proc/sys/kernel/rcutorture_runnable
+ to manually override this setting. This /proc file is
+ available only when the RCU torture tests have been built
+ into the kernel.
+
+ Say Y here if you want the RCU torture tests to start during
+ boot (you probably don't).
+ Say N here if you want the RCU torture tests to start only
+ after being manually enabled via /proc.
+
config KPROBES_SANITY_TEST
bool "Kprobes sanity tests"
depends on DEBUG_KERNEL
@@ -563,6 +619,9 @@ config BACKTRACE_SELF_TEST
for distributions or general kernels, but only for kernel
developers working on architecture code.
+ Note that if you want to also test saved backtraces, you will
+ have to enable STACKTRACE as well.
+
Say N if you are unsure.
config LKDTM
@@ -634,6 +693,8 @@ config LATENCYTOP
Enable this option if you want to use the LatencyTOP tool
to find out which userspace is blocking on what kernel operations.
+source kernel/trace/Kconfig
+
config PROVIDE_OHCI1394_DMA_INIT
bool "Remote debugging over FireWire early on boot"
depends on PCI && X86
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index f2e01ac5ab09..2cfd2721f7ed 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -1,4 +1,7 @@
+config HAVE_ARCH_KGDB
+ bool
+
menuconfig KGDB
bool "KGDB: kernel debugging with remote gdb"
select FRAME_POINTER
@@ -10,15 +13,10 @@ menuconfig KGDB
at http://kgdb.sourceforge.net as well as in DocBook form
in Documentation/DocBook/. If unsure, say N.
-config HAVE_ARCH_KGDB_SHADOW_INFO
- bool
-
-config HAVE_ARCH_KGDB
- bool
+if KGDB
config KGDB_SERIAL_CONSOLE
tristate "KGDB: use kgdb over the serial console"
- depends on KGDB
select CONSOLE_POLL
select MAGIC_SYSRQ
default y
@@ -28,7 +26,6 @@ config KGDB_SERIAL_CONSOLE
config KGDB_TESTS
bool "KGDB: internal test suite"
- depends on KGDB
default n
help
This is a kgdb I/O module specifically designed to test
@@ -56,3 +53,5 @@ config KGDB_TESTS_BOOT_STRING
boot. See the drivers/misc/kgdbts.c for detailed
information about other strings you could use beyond the
default of V1F100.
+
+endif # KGDB
diff --git a/lib/Makefile b/lib/Makefile
index 74b0cfb1fcc3..3b1f94bbe9de 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -2,18 +2,23 @@
# Makefile for some libs needed in the kernel.
#
+ifdef CONFIG_FTRACE
+ORIG_CFLAGS := $(KBUILD_CFLAGS)
+KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
+endif
+
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o \
idr.o int_sqrt.o extable.o prio_tree.o \
sha1.o irq_regs.o reciprocal_div.o argv_split.o \
- proportions.o prio_heap.o ratelimit.o
+ proportions.o prio_heap.o ratelimit.o show_mem.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o kref.o klist.o
-obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
+obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
@@ -45,6 +50,7 @@ endif
obj-$(CONFIG_BITREVERSE) += bitrev.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC16) += crc16.o
+obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_CRC7) += crc7.o
@@ -72,6 +78,8 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o
obj-$(CONFIG_HAVE_LMB) += lmb.o
+obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
+
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/bcd.c b/lib/bcd.c
new file mode 100644
index 000000000000..d74257fd0fe7
--- /dev/null
+++ b/lib/bcd.c
@@ -0,0 +1,14 @@
+#include <linux/bcd.h>
+#include <linux/module.h>
+
+unsigned bcd2bin(unsigned char val)
+{
+ return (val & 0x0f) + (val >> 4) * 10;
+}
+EXPORT_SYMBOL(bcd2bin);
+
+unsigned char bin2bcd(unsigned val)
+{
+ return ((val / 10) << 4) + val % 10;
+}
+EXPORT_SYMBOL(bin2bcd);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index c4cb48f77f0c..482df94ea21e 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -316,22 +316,6 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
EXPORT_SYMBOL(bitmap_scnprintf);
/**
- * bitmap_scnprintf_len - return buffer length needed to convert
- * bitmap to an ASCII hex string.
- * @len: number of bits to be converted
- */
-int bitmap_scnprintf_len(unsigned int len)
-{
- /* we need 9 chars per word for 32 bit words (8 hexdigits + sep/null) */
- int bitslen = ALIGN(len, CHUNKSZ);
- int wordlen = CHUNKSZ / 4;
- int buflen = (bitslen / wordlen) * (wordlen + 1) * sizeof(char);
-
- return buflen;
-}
-EXPORT_SYMBOL(bitmap_scnprintf_len);
-
-/**
* __bitmap_parse - convert an ASCII hex string into a bitmap.
* @buf: pointer to buffer containing string.
* @buflen: buffer size in bytes. If string is smaller than this
diff --git a/lib/bitrev.c b/lib/bitrev.c
index 989aff73f881..3956203456d4 100644
--- a/lib/bitrev.c
+++ b/lib/bitrev.c
@@ -42,10 +42,11 @@ const u8 byte_rev_table[256] = {
};
EXPORT_SYMBOL_GPL(byte_rev_table);
-static __always_inline u16 bitrev16(u16 x)
+u16 bitrev16(u16 x)
{
return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8);
}
+EXPORT_SYMBOL(bitrev16);
/**
* bitrev32 - reverse the order of bits in a u32 value
diff --git a/lib/bug.c b/lib/bug.c
index 530f38f55787..bfeafd60ee9f 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -37,6 +37,7 @@
*/
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/sched.h>
@@ -149,6 +150,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
(void *)bugaddr);
show_regs(regs);
+ add_taint(TAINT_WARN);
return BUG_TRAP_TYPE_WARN;
}
diff --git a/lib/cmdline.c b/lib/cmdline.c
index f596c08d213a..5ba8a942a478 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -116,7 +116,7 @@ char *get_options(const char *str, int nints, int *ints)
/**
* memparse - parse a string with mem suffixes into a number
* @ptr: Where parse begins
- * @retptr: (output) Pointer to next char after parse completes
+ * @retptr: (output) Optional pointer to next char after parse completes
*
* Parses a string into a number. The number stored at @ptr is
* potentially suffixed with %K (for kilobytes, or 1024 bytes),
@@ -126,11 +126,13 @@ char *get_options(const char *str, int nints, int *ints)
* megabyte, or one gigabyte, respectively.
*/
-unsigned long long memparse (char *ptr, char **retptr)
+unsigned long long memparse(char *ptr, char **retptr)
{
- unsigned long long ret = simple_strtoull (ptr, retptr, 0);
+ char *endptr; /* local pointer to end of parsed string */
- switch (**retptr) {
+ unsigned long long ret = simple_strtoull(ptr, &endptr, 0);
+
+ switch (*endptr) {
case 'G':
case 'g':
ret <<= 10;
@@ -140,10 +142,14 @@ unsigned long long memparse (char *ptr, char **retptr)
case 'K':
case 'k':
ret <<= 10;
- (*retptr)++;
+ endptr++;
default:
break;
}
+
+ if (retptr)
+ *retptr = endptr;
+
return ret;
}
diff --git a/lib/cpumask.c b/lib/cpumask.c
index bb4f76d3c3e7..5f97dc25ef9c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp)
}
EXPORT_SYMBOL(__next_cpu);
+#if NR_CPUS > 64
+int __next_cpu_nr(int n, const cpumask_t *srcp)
+{
+ return min_t(int, nr_cpu_ids,
+ find_next_bit(srcp->bits, nr_cpu_ids, n+1));
+}
+EXPORT_SYMBOL(__next_cpu_nr);
+#endif
+
int __any_online_cpu(const cpumask_t *mask)
{
int cpu;
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
new file mode 100644
index 000000000000..fbbd66ed86cd
--- /dev/null
+++ b/lib/crc-t10dif.c
@@ -0,0 +1,67 @@
+/*
+ * T10 Data Integrity Field CRC16 calculation
+ *
+ * Copyright (c) 2007 Oracle Corporation. All rights reserved.
+ * Written by Martin K. Petersen <martin.petersen@oracle.com>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/crc-t10dif.h>
+
+/* Table generated using the following polynomium:
+ * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
+ * gt: 0x8bb7
+ */
+static const __u16 t10_dif_crc_table[256] = {
+ 0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
+ 0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
+ 0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
+ 0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
+ 0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
+ 0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
+ 0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
+ 0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
+ 0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
+ 0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
+ 0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
+ 0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
+ 0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
+ 0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
+ 0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
+ 0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
+ 0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
+ 0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
+ 0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
+ 0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
+ 0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
+ 0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
+ 0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
+ 0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
+ 0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
+ 0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
+ 0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
+ 0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
+ 0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
+ 0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
+ 0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
+ 0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
+};
+
+__u16 crc_t10dif(const unsigned char *buffer, size_t len)
+{
+ __u16 crc = 0;
+ unsigned int i;
+
+ for (i = 0 ; i < len ; i++)
+ crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
+
+ return crc;
+}
+EXPORT_SYMBOL(crc_t10dif);
+
+MODULE_DESCRIPTION("T10 DIF CRC calculation");
+MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a76a5e122ae1..45a6bde762d1 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -68,6 +68,7 @@ static int fill_pool(void)
{
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
struct debug_obj *new;
+ unsigned long flags;
if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
return obj_pool_free;
@@ -81,10 +82,10 @@ static int fill_pool(void)
if (!new)
return obj_pool_free;
- spin_lock(&pool_lock);
+ spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
obj_pool_free++;
- spin_unlock(&pool_lock);
+ spin_unlock_irqrestore(&pool_lock, flags);
}
return obj_pool_free;
}
@@ -110,16 +111,13 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
}
/*
- * Allocate a new object. If the pool is empty and no refill possible,
- * switch off the debugger.
+ * Allocate a new object. If the pool is empty, switch off the debugger.
*/
static struct debug_obj *
alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
{
struct debug_obj *obj = NULL;
- int retry = 0;
-repeat:
spin_lock(&pool_lock);
if (obj_pool.first) {
obj = hlist_entry(obj_pool.first, typeof(*obj), node);
@@ -141,9 +139,6 @@ repeat:
}
spin_unlock(&pool_lock);
- if (fill_pool() && !obj && !retry++)
- goto repeat;
-
return obj;
}
@@ -210,9 +205,8 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
if (limit < 5 && obj->descr != descr_test) {
limit++;
- printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
+ WARN(1, KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
obj_states[obj->state], obj->descr->name);
- WARN_ON(1);
}
debug_objects_warnings++;
}
@@ -231,15 +225,13 @@ debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
static void debug_object_is_on_stack(void *addr, int onstack)
{
- void *stack = current->stack;
int is_on_stack;
static int limit;
if (limit > 4)
return;
- is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE));
-
+ is_on_stack = object_is_on_stack(addr);
if (is_on_stack == onstack)
return;
@@ -261,6 +253,8 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
struct debug_obj *obj;
unsigned long flags;
+ fill_pool();
+
db = get_bucket((unsigned long) addr);
spin_lock_irqsave(&db->lock, flags);
@@ -738,26 +732,22 @@ check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
obj = lookup_object(addr, db);
if (!obj && state != ODEBUG_STATE_NONE) {
- printk(KERN_ERR "ODEBUG: selftest object not found\n");
- WARN_ON(1);
+ WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
goto out;
}
if (obj && obj->state != state) {
- printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
+ WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
obj->state, state);
- WARN_ON(1);
goto out;
}
if (fixups != debug_objects_fixups) {
- printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
+ WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
fixups, debug_objects_fixups);
- WARN_ON(1);
goto out;
}
if (warnings != debug_objects_warnings) {
- printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
+ WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
warnings, debug_objects_warnings);
- WARN_ON(1);
goto out;
}
res = 0;
diff --git a/lib/devres.c b/lib/devres.c
index 26c87c49d776..72c8909006da 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -2,7 +2,7 @@
#include <linux/io.h>
#include <linux/module.h>
-static void devm_ioremap_release(struct device *dev, void *res)
+void devm_ioremap_release(struct device *dev, void *res)
{
iounmap(*(void __iomem **)res);
}
diff --git a/lib/div64.c b/lib/div64.c
index bb5bd0c0f030..a111eb8de9cf 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -98,3 +98,13 @@ EXPORT_SYMBOL(div64_u64);
#endif
#endif /* BITS_PER_LONG == 32 */
+
+/*
+ * Iterative div/mod for use when dividend is not expected to be much
+ * bigger than divisor.
+ */
+u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
+{
+ return __iter_div_u64_rem(dividend, divisor, remainder);
+}
+EXPORT_SYMBOL(iter_div_u64_rem);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 343546550dc9..f07c0db81d26 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -12,6 +12,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
+const char hex_asc[] = "0123456789abcdef";
+EXPORT_SYMBOL(hex_asc);
+
/**
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
* @buf: data blob to dump
@@ -93,8 +96,8 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
for (j = 0; (j < rowsize) && (j < len) && (lx + 4) < linebuflen;
j++) {
ch = ptr[j];
- linebuf[lx++] = hex_asc(ch >> 4);
- linebuf[lx++] = hex_asc(ch & 0x0f);
+ linebuf[lx++] = hex_asc_hi(ch);
+ linebuf[lx++] = hex_asc_lo(ch);
linebuf[lx++] = ' ';
}
ascii_column = 3 * rowsize + 2;
diff --git a/lib/idr.c b/lib/idr.c
index 7a02e173f027..e728c7fccc4d 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -6,6 +6,8 @@
* Modified by George Anzinger to reuse immediately and to use
* find bit instructions. Also removed _irq on spinlocks.
*
+ * Modified by Nadia Derbey to make it RCU safe.
+ *
* Small id to pointer translation service.
*
* It uses a radix tree like structure as a sparse array indexed
@@ -35,7 +37,7 @@
static struct kmem_cache *idr_layer_cache;
-static struct idr_layer *alloc_layer(struct idr *idp)
+static struct idr_layer *get_from_free_list(struct idr *idp)
{
struct idr_layer *p;
unsigned long flags;
@@ -50,15 +52,28 @@ static struct idr_layer *alloc_layer(struct idr *idp)
return(p);
}
+static void idr_layer_rcu_free(struct rcu_head *head)
+{
+ struct idr_layer *layer;
+
+ layer = container_of(head, struct idr_layer, rcu_head);
+ kmem_cache_free(idr_layer_cache, layer);
+}
+
+static inline void free_layer(struct idr_layer *p)
+{
+ call_rcu(&p->rcu_head, idr_layer_rcu_free);
+}
+
/* only called when idp->lock is held */
-static void __free_layer(struct idr *idp, struct idr_layer *p)
+static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
{
p->ary[0] = idp->id_free;
idp->id_free = p;
idp->id_free_cnt++;
}
-static void free_layer(struct idr *idp, struct idr_layer *p)
+static void move_to_free_list(struct idr *idp, struct idr_layer *p)
{
unsigned long flags;
@@ -66,7 +81,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p)
* Depends on the return element being zeroed.
*/
spin_lock_irqsave(&idp->lock, flags);
- __free_layer(idp, p);
+ __move_to_free_list(idp, p);
spin_unlock_irqrestore(&idp->lock, flags);
}
@@ -96,7 +111,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)
* @gfp_mask: memory allocation flags
*
* This function should be called prior to locking and calling the
- * following function. It preallocates enough memory to satisfy
+ * idr_get_new* functions. It preallocates enough memory to satisfy
* the worst possible allocation.
*
* If the system is REALLY out of memory this function returns 0,
@@ -109,7 +124,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
if (new == NULL)
return (0);
- free_layer(idp, new);
+ move_to_free_list(idp, new);
}
return 1;
}
@@ -143,7 +158,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
/* if already at the top layer, we need to grow */
if (!(p = pa[l])) {
*starting_id = id;
- return -2;
+ return IDR_NEED_TO_GROW;
}
/* If we need to go up one layer, continue the
@@ -160,16 +175,17 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
id = ((id >> sh) ^ n ^ m) << sh;
}
if ((id >= MAX_ID_BIT) || (id < 0))
- return -3;
+ return IDR_NOMORE_SPACE;
if (l == 0)
break;
/*
* Create the layer below if it is missing.
*/
if (!p->ary[m]) {
- if (!(new = alloc_layer(idp)))
+ new = get_from_free_list(idp);
+ if (!new)
return -1;
- p->ary[m] = new;
+ rcu_assign_pointer(p->ary[m], new);
p->count++;
}
pa[l--] = p;
@@ -192,7 +208,7 @@ build_up:
p = idp->top;
layers = idp->layers;
if (unlikely(!p)) {
- if (!(p = alloc_layer(idp)))
+ if (!(p = get_from_free_list(idp)))
return -1;
layers = 1;
}
@@ -204,7 +220,7 @@ build_up:
layers++;
if (!p->count)
continue;
- if (!(new = alloc_layer(idp))) {
+ if (!(new = get_from_free_list(idp))) {
/*
* The allocation failed. If we built part of
* the structure tear it down.
@@ -214,7 +230,7 @@ build_up:
p = p->ary[0];
new->ary[0] = NULL;
new->bitmap = new->count = 0;
- __free_layer(idp, new);
+ __move_to_free_list(idp, new);
}
spin_unlock_irqrestore(&idp->lock, flags);
return -1;
@@ -225,10 +241,10 @@ build_up:
__set_bit(0, &new->bitmap);
p = new;
}
- idp->top = p;
+ rcu_assign_pointer(idp->top, p);
idp->layers = layers;
v = sub_alloc(idp, &id, pa);
- if (v == -2)
+ if (v == IDR_NEED_TO_GROW)
goto build_up;
return(v);
}
@@ -244,7 +260,8 @@ static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
* Successfully found an empty slot. Install the user
* pointer and mark the slot full.
*/
- pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr;
+ rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
+ (struct idr_layer *)ptr);
pa[0]->count++;
idr_mark_full(pa, id);
}
@@ -277,12 +294,8 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
* This is a cheap hack until the IDR code can be fixed to
* return proper error values.
*/
- if (rv < 0) {
- if (rv == -1)
- return -EAGAIN;
- else /* Will be -3 */
- return -ENOSPC;
- }
+ if (rv < 0)
+ return _idr_rc_to_errno(rv);
*id = rv;
return 0;
}
@@ -312,12 +325,8 @@ int idr_get_new(struct idr *idp, void *ptr, int *id)
* This is a cheap hack until the IDR code can be fixed to
* return proper error values.
*/
- if (rv < 0) {
- if (rv == -1)
- return -EAGAIN;
- else /* Will be -3 */
- return -ENOSPC;
- }
+ if (rv < 0)
+ return _idr_rc_to_errno(rv);
*id = rv;
return 0;
}
@@ -325,7 +334,8 @@ EXPORT_SYMBOL(idr_get_new);
static void idr_remove_warning(int id)
{
- printk("idr_remove called for id=%d which is not allocated.\n", id);
+ printk(KERN_WARNING
+ "idr_remove called for id=%d which is not allocated.\n", id);
dump_stack();
}
@@ -334,6 +344,7 @@ static void sub_remove(struct idr *idp, int shift, int id)
struct idr_layer *p = idp->top;
struct idr_layer **pa[MAX_LEVEL];
struct idr_layer ***paa = &pa[0];
+ struct idr_layer *to_free;
int n;
*paa = NULL;
@@ -349,13 +360,18 @@ static void sub_remove(struct idr *idp, int shift, int id)
n = id & IDR_MASK;
if (likely(p != NULL && test_bit(n, &p->bitmap))){
__clear_bit(n, &p->bitmap);
- p->ary[n] = NULL;
+ rcu_assign_pointer(p->ary[n], NULL);
+ to_free = NULL;
while(*paa && ! --((**paa)->count)){
- free_layer(idp, **paa);
+ if (to_free)
+ free_layer(to_free);
+ to_free = **paa;
**paa-- = NULL;
}
if (!*paa)
idp->layers = 0;
+ if (to_free)
+ free_layer(to_free);
} else
idr_remove_warning(id);
}
@@ -368,22 +384,34 @@ static void sub_remove(struct idr *idp, int shift, int id)
void idr_remove(struct idr *idp, int id)
{
struct idr_layer *p;
+ struct idr_layer *to_free;
/* Mask off upper bits we don't use for the search. */
id &= MAX_ID_MASK;
sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
- idp->top->ary[0]) { // We can drop a layer
-
+ idp->top->ary[0]) {
+ /*
+ * Single child at leftmost slot: we can shrink the tree.
+ * This level is not needed anymore since when layers are
+ * inserted, they are inserted at the top of the existing
+ * tree.
+ */
+ to_free = idp->top;
p = idp->top->ary[0];
- idp->top->bitmap = idp->top->count = 0;
- free_layer(idp, idp->top);
- idp->top = p;
+ rcu_assign_pointer(idp->top, p);
--idp->layers;
+ to_free->bitmap = to_free->count = 0;
+ free_layer(to_free);
}
while (idp->id_free_cnt >= IDR_FREE_MAX) {
- p = alloc_layer(idp);
+ p = get_from_free_list(idp);
+ /*
+ * Note: we don't call the rcu callback here, since the only
+ * layers that fall into the freelist are those that have been
+ * preallocated.
+ */
kmem_cache_free(idr_layer_cache, p);
}
return;
@@ -424,15 +452,13 @@ void idr_remove_all(struct idr *idp)
id += 1 << n;
while (n < fls(id)) {
- if (p) {
- memset(p, 0, sizeof *p);
- free_layer(idp, p);
- }
+ if (p)
+ free_layer(p);
n += IDR_BITS;
p = *--paa;
}
}
- idp->top = NULL;
+ rcu_assign_pointer(idp->top, NULL);
idp->layers = 0;
}
EXPORT_SYMBOL(idr_remove_all);
@@ -444,7 +470,7 @@ EXPORT_SYMBOL(idr_remove_all);
void idr_destroy(struct idr *idp)
{
while (idp->id_free_cnt) {
- struct idr_layer *p = alloc_layer(idp);
+ struct idr_layer *p = get_from_free_list(idp);
kmem_cache_free(idr_layer_cache, p);
}
}
@@ -459,7 +485,8 @@ EXPORT_SYMBOL(idr_destroy);
* return indicates that @id is not valid or you passed %NULL in
* idr_get_new().
*
- * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
+ * This function can be called under rcu_read_lock(), given that the leaf
+ * pointers lifetimes are correctly managed.
*/
void *idr_find(struct idr *idp, int id)
{
@@ -467,7 +494,7 @@ void *idr_find(struct idr *idp, int id)
struct idr_layer *p;
n = idp->layers * IDR_BITS;
- p = idp->top;
+ p = rcu_dereference(idp->top);
/* Mask off upper bits we don't use for the search. */
id &= MAX_ID_MASK;
@@ -477,7 +504,7 @@ void *idr_find(struct idr *idp, int id)
while (n > 0 && p) {
n -= IDR_BITS;
- p = p->ary[(id >> n) & IDR_MASK];
+ p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
}
return((void *)p);
}
@@ -510,7 +537,7 @@ int idr_for_each(struct idr *idp,
struct idr_layer **paa = &pa[0];
n = idp->layers * IDR_BITS;
- p = idp->top;
+ p = rcu_dereference(idp->top);
max = 1 << n;
id = 0;
@@ -518,7 +545,7 @@ int idr_for_each(struct idr *idp,
while (n > 0 && p) {
n -= IDR_BITS;
*paa++ = p;
- p = p->ary[(id >> n) & IDR_MASK];
+ p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
}
if (p) {
@@ -548,7 +575,7 @@ EXPORT_SYMBOL(idr_for_each);
* A -ENOENT return indicates that @id was not found.
* A -EINVAL return indicates that @id was not within valid constraints.
*
- * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
+ * The caller must serialize with writers.
*/
void *idr_replace(struct idr *idp, void *ptr, int id)
{
@@ -574,13 +601,13 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
return ERR_PTR(-ENOENT);
old_p = p->ary[n];
- p->ary[n] = ptr;
+ rcu_assign_pointer(p->ary[n], ptr);
return old_p;
}
EXPORT_SYMBOL(idr_replace);
-static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer)
+static void idr_cache_ctor(void *idr_layer)
{
memset(idr_layer, 0, sizeof(struct idr_layer));
}
@@ -694,12 +721,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
restart:
/* get vacant slot */
t = idr_get_empty_slot(&ida->idr, idr_id, pa);
- if (t < 0) {
- if (t == -1)
- return -EAGAIN;
- else /* will be -3 */
- return -ENOSPC;
- }
+ if (t < 0)
+ return _idr_rc_to_errno(t);
if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
return -ENOSPC;
@@ -720,7 +743,8 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
return -EAGAIN;
memset(bitmap, 0, sizeof(struct ida_bitmap));
- pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap;
+ rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
+ (void *)bitmap);
pa[0]->count++;
}
@@ -749,7 +773,7 @@ int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
* allocation.
*/
if (ida->idr.id_free_cnt || ida->free_bitmap) {
- struct idr_layer *p = alloc_layer(&ida->idr);
+ struct idr_layer *p = get_from_free_list(&ida->idr);
if (p)
kmem_cache_free(idr_layer_cache, p);
}
diff --git a/lib/inflate.c b/lib/inflate.c
index 9762294be062..1a8e8a978128 100644
--- a/lib/inflate.c
+++ b/lib/inflate.c
@@ -230,6 +230,45 @@ STATIC const ush mask_bits[] = {
#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}}
#define DUMPBITS(n) {b>>=(n);k-=(n);}
+#ifndef NO_INFLATE_MALLOC
+/* A trivial malloc implementation, adapted from
+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+ */
+
+static unsigned long malloc_ptr;
+static int malloc_count;
+
+static void *malloc(int size)
+{
+ void *p;
+
+ if (size < 0)
+ error("Malloc error");
+ if (!malloc_ptr)
+ malloc_ptr = free_mem_ptr;
+
+ malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
+
+ p = (void *)malloc_ptr;
+ malloc_ptr += size;
+
+ if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
+ error("Out of memory");
+
+ malloc_count++;
+ return p;
+}
+
+static void free(void *where)
+{
+ malloc_count--;
+ if (!malloc_count)
+ malloc_ptr = free_mem_ptr;
+}
+#else
+#define malloc(a) kmalloc(a, GFP_KERNEL)
+#define free(a) kfree(a)
+#endif
/*
Huffman code decoding is performed using a multi-level table lookup.
@@ -1045,7 +1084,6 @@ STATIC int INIT inflate(void)
int e; /* last block flag */
int r; /* result code */
unsigned h; /* maximum struct huft's malloc'ed */
- void *ptr;
/* initialize window, bit buffer */
wp = 0;
@@ -1057,12 +1095,12 @@ STATIC int INIT inflate(void)
h = 0;
do {
hufts = 0;
- gzip_mark(&ptr);
- if ((r = inflate_block(&e)) != 0) {
- gzip_release(&ptr);
- return r;
- }
- gzip_release(&ptr);
+#ifdef ARCH_HAS_DECOMP_WDOG
+ arch_decomp_wdog();
+#endif
+ r = inflate_block(&e);
+ if (r)
+ return r;
if (hufts > h)
h = hufts;
} while (!e);
diff --git a/lib/iomap.c b/lib/iomap.c
index 37a3ea4cac9f..d32229385151 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -40,8 +40,7 @@ static void bad_io_access(unsigned long port, const char *access)
static int count = 10;
if (count) {
count--;
- printk(KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access);
- WARN_ON(1);
+ WARN(1, KERN_ERR "Bad IO access at port %#lx (%s)\n", port, access);
}
}
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
index a3b8d4c3f77a..889ddce2021e 100644
--- a/lib/iommu-helper.c
+++ b/lib/iommu-helper.c
@@ -80,3 +80,11 @@ void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
}
}
EXPORT_SYMBOL(iommu_area_free);
+
+unsigned long iommu_num_pages(unsigned long addr, unsigned long len)
+{
+ unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
+
+ return size >> PAGE_SHIFT;
+}
+EXPORT_SYMBOL(iommu_num_pages);
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index cd3e82530b03..01a3c22c1b5a 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -11,79 +11,121 @@
#include <linux/semaphore.h>
/*
- * The 'big kernel semaphore'
+ * The 'big kernel lock'
*
- * This mutex is taken and released recursively by lock_kernel()
+ * This spinlock is taken and released recursively by lock_kernel()
* and unlock_kernel(). It is transparently dropped and reacquired
* over schedule(). It is used to protect legacy code that hasn't
* been migrated to a proper locking design yet.
*
- * Note: code locked by this semaphore will only be serialized against
- * other code using the same locking facility. The code guarantees that
- * the task remains on the same CPU.
- *
* Don't use in new code.
*/
-static DECLARE_MUTEX(kernel_sem);
+static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
+
/*
- * Re-acquire the kernel semaphore.
+ * Acquire/release the underlying lock from the scheduler.
*
- * This function is called with preemption off.
+ * This is called with preemption disabled, and should
+ * return an error value if it cannot get the lock and
+ * TIF_NEED_RESCHED gets set.
*
- * We are executing in schedule() so the code must be extremely careful
- * about recursion, both due to the down() and due to the enabling of
- * preemption. schedule() will re-check the preemption flag after
- * reacquiring the semaphore.
+ * If it successfully gets the lock, it should increment
+ * the preemption count like any spinlock does.
+ *
+ * (This works on UP too - _raw_spin_trylock will never
+ * return false in that case)
*/
int __lockfunc __reacquire_kernel_lock(void)
{
- struct task_struct *task = current;
- int saved_lock_depth = task->lock_depth;
-
- BUG_ON(saved_lock_depth < 0);
-
- task->lock_depth = -1;
- preempt_enable_no_resched();
-
- down(&kernel_sem);
-
+ while (!_raw_spin_trylock(&kernel_flag)) {
+ if (test_thread_flag(TIF_NEED_RESCHED))
+ return -EAGAIN;
+ cpu_relax();
+ }
preempt_disable();
- task->lock_depth = saved_lock_depth;
-
return 0;
}
void __lockfunc __release_kernel_lock(void)
{
- up(&kernel_sem);
+ _raw_spin_unlock(&kernel_flag);
+ preempt_enable_no_resched();
}
/*
- * Getting the big kernel semaphore.
+ * These are the BKL spinlocks - we try to be polite about preemption.
+ * If SMP is not on (ie UP preemption), this all goes away because the
+ * _raw_spin_trylock() will always succeed.
*/
-void __lockfunc lock_kernel(void)
+#ifdef CONFIG_PREEMPT
+static inline void __lock_kernel(void)
{
- struct task_struct *task = current;
- int depth = task->lock_depth + 1;
+ preempt_disable();
+ if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
+ /*
+ * If preemption was disabled even before this
+ * was called, there's nothing we can be polite
+ * about - just spin.
+ */
+ if (preempt_count() > 1) {
+ _raw_spin_lock(&kernel_flag);
+ return;
+ }
- if (likely(!depth))
/*
- * No recursion worries - we set up lock_depth _after_
+ * Otherwise, let's wait for the kernel lock
+ * with preemption enabled..
*/
- down(&kernel_sem);
+ do {
+ preempt_enable();
+ while (spin_is_locked(&kernel_flag))
+ cpu_relax();
+ preempt_disable();
+ } while (!_raw_spin_trylock(&kernel_flag));
+ }
+}
- task->lock_depth = depth;
+#else
+
+/*
+ * Non-preemption case - just get the spinlock
+ */
+static inline void __lock_kernel(void)
+{
+ _raw_spin_lock(&kernel_flag);
}
+#endif
-void __lockfunc unlock_kernel(void)
+static inline void __unlock_kernel(void)
{
- struct task_struct *task = current;
+ /*
+ * the BKL is not covered by lockdep, so we open-code the
+ * unlocking sequence (and thus avoid the dep-chain ops):
+ */
+ _raw_spin_unlock(&kernel_flag);
+ preempt_enable();
+}
- BUG_ON(task->lock_depth < 0);
+/*
+ * Getting the big kernel lock.
+ *
+ * This cannot happen asynchronously, so we only need to
+ * worry about other CPU's.
+ */
+void __lockfunc lock_kernel(void)
+{
+ int depth = current->lock_depth+1;
+ if (likely(!depth))
+ __lock_kernel();
+ current->lock_depth = depth;
+}
- if (likely(--task->lock_depth < 0))
- up(&kernel_sem);
+void __lockfunc unlock_kernel(void)
+{
+ BUG_ON(current->lock_depth < 0);
+ if (likely(--current->lock_depth < 0))
+ __unlock_kernel();
}
EXPORT_SYMBOL(lock_kernel);
diff --git a/lib/kobject.c b/lib/kobject.c
index 718e5101c263..bd732ffebc85 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -164,9 +164,8 @@ static int kobject_add_internal(struct kobject *kobj)
return -ENOENT;
if (!kobj->name || !kobj->name[0]) {
- pr_debug("kobject: (%p): attempted to be registered with empty "
+ WARN(1, "kobject: (%p): attempted to be registered with empty "
"name!\n", kobj);
- WARN_ON(1);
return -EINVAL;
}
@@ -216,13 +215,19 @@ static int kobject_add_internal(struct kobject *kobj)
static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
va_list vargs)
{
- /* Free the old name, if necessary. */
- kfree(kobj->name);
+ const char *old_name = kobj->name;
+ char *s;
kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
if (!kobj->name)
return -ENOMEM;
+ /* ewww... some of these buggers have '/' in the name ... */
+ s = strchr(kobj->name, '/');
+ if (s)
+ s[0] = '!';
+
+ kfree(old_name);
return 0;
}
@@ -439,6 +444,7 @@ out:
return error;
}
+EXPORT_SYMBOL_GPL(kobject_rename);
/**
* kobject_move - move object to another parent
@@ -576,12 +582,10 @@ static void kobject_release(struct kref *kref)
void kobject_put(struct kobject *kobj)
{
if (kobj) {
- if (!kobj->state_initialized) {
- printk(KERN_WARNING "kobject: '%s' (%p): is not "
+ if (!kobj->state_initialized)
+ WARN(1, KERN_WARNING "kobject: '%s' (%p): is not "
"initialized, yet kobject_put() is being "
"called.\n", kobject_name(kobj), kobj);
- WARN_ON(1);
- }
kref_put(&kobj->kref, kobject_release);
}
}
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2fa545a63160..3f914725bda8 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -245,7 +245,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
if (retval)
goto exit;
- call_usermodehelper(argv[0], argv, env->envp, UMH_WAIT_EXEC);
+ retval = call_usermodehelper(argv[0], argv,
+ env->envp, UMH_WAIT_EXEC);
}
exit:
@@ -284,8 +285,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
int len;
if (env->envp_idx >= ARRAY_SIZE(env->envp)) {
- printk(KERN_ERR "add_uevent_var: too many keys\n");
- WARN_ON(1);
+ WARN(1, KERN_ERR "add_uevent_var: too many keys\n");
return -ENOMEM;
}
@@ -296,8 +296,7 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
va_end(args);
if (len >= (sizeof(env->buf) - env->buflen)) {
- printk(KERN_ERR "add_uevent_var: buffer size too small\n");
- WARN_ON(1);
+ WARN(1, KERN_ERR "add_uevent_var: buffer size too small\n");
return -ENOMEM;
}
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 4350ba9655bd..1a39f4e3ae1f 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -20,18 +20,14 @@ void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
- if (unlikely(next->prev != prev)) {
- printk(KERN_ERR "list_add corruption. next->prev should be "
- "prev (%p), but was %p. (next=%p).\n",
- prev, next->prev, next);
- BUG();
- }
- if (unlikely(prev->next != next)) {
- printk(KERN_ERR "list_add corruption. prev->next should be "
- "next (%p), but was %p. (prev=%p).\n",
- next, prev->next, prev);
- BUG();
- }
+ WARN(next->prev != prev,
+ "list_add corruption. next->prev should be "
+ "prev (%p), but was %p. (next=%p).\n",
+ prev, next->prev, next);
+ WARN(prev->next != next,
+ "list_add corruption. prev->next should be "
+ "next (%p), but was %p. (prev=%p).\n",
+ next, prev->next, prev);
next->prev = new;
new->next = next;
new->prev = prev;
@@ -40,20 +36,6 @@ void __list_add(struct list_head *new,
EXPORT_SYMBOL(__list_add);
/**
- * list_add - add a new entry
- * @new: new entry to be added
- * @head: list head to add it after
- *
- * Insert a new entry after the specified head.
- * This is good for implementing stacks.
- */
-void list_add(struct list_head *new, struct list_head *head)
-{
- __list_add(new, head, head->next);
-}
-EXPORT_SYMBOL(list_add);
-
-/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty on entry does not return true after this, the entry is
@@ -61,16 +43,12 @@ EXPORT_SYMBOL(list_add);
*/
void list_del(struct list_head *entry)
{
- if (unlikely(entry->prev->next != entry)) {
- printk(KERN_ERR "list_del corruption. prev->next should be %p, "
- "but was %p\n", entry, entry->prev->next);
- BUG();
- }
- if (unlikely(entry->next->prev != entry)) {
- printk(KERN_ERR "list_del corruption. next->prev should be %p, "
- "but was %p\n", entry, entry->next->prev);
- BUG();
- }
+ WARN(entry->prev->next != entry,
+ "list_del corruption. prev->next should be %p, "
+ "but was %p\n", entry, entry->prev->next);
+ WARN(entry->next->prev != entry,
+ "list_del corruption. next->prev should be %p, "
+ "but was %p\n", entry, entry->next->prev);
__list_del(entry->prev, entry->next);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
diff --git a/lib/lmb.c b/lib/lmb.c
index 83287d3869a3..5d7b9286503e 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -19,31 +19,43 @@
struct lmb lmb;
+static int lmb_debug;
+
+static int __init early_lmb(char *p)
+{
+ if (p && strstr(p, "debug"))
+ lmb_debug = 1;
+ return 0;
+}
+early_param("lmb", early_lmb);
+
void lmb_dump_all(void)
{
-#ifdef DEBUG
unsigned long i;
- pr_debug("lmb_dump_all:\n");
- pr_debug(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
- pr_debug(" memory.size = 0x%llx\n",
+ if (!lmb_debug)
+ return;
+
+ pr_info("lmb_dump_all:\n");
+ pr_info(" memory.cnt = 0x%lx\n", lmb.memory.cnt);
+ pr_info(" memory.size = 0x%llx\n",
(unsigned long long)lmb.memory.size);
for (i=0; i < lmb.memory.cnt ;i++) {
- pr_debug(" memory.region[0x%x].base = 0x%llx\n",
+ pr_info(" memory.region[0x%lx].base = 0x%llx\n",
i, (unsigned long long)lmb.memory.region[i].base);
- pr_debug(" .size = 0x%llx\n",
+ pr_info(" .size = 0x%llx\n",
(unsigned long long)lmb.memory.region[i].size);
}
- pr_debug(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
- pr_debug(" reserved.size = 0x%lx\n", lmb.reserved.size);
+ pr_info(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt);
+ pr_info(" reserved.size = 0x%llx\n",
+ (unsigned long long)lmb.memory.size);
for (i=0; i < lmb.reserved.cnt ;i++) {
- pr_debug(" reserved.region[0x%x].base = 0x%llx\n",
+ pr_info(" reserved.region[0x%lx].base = 0x%llx\n",
i, (unsigned long long)lmb.reserved.region[i].base);
- pr_debug(" .size = 0x%llx\n",
+ pr_info(" .size = 0x%llx\n",
(unsigned long long)lmb.reserved.region[i].size);
}
-#endif /* DEBUG */
}
static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
@@ -286,8 +298,7 @@ static u64 __init lmb_alloc_nid_unreserved(u64 start, u64 end,
j = lmb_overlaps_region(&lmb.reserved, base, size);
if (j < 0) {
/* this area isn't reserved, take it */
- if (lmb_add_region(&lmb.reserved, base,
- lmb_align_up(size, align)) < 0)
+ if (lmb_add_region(&lmb.reserved, base, size) < 0)
base = ~(u64)0;
return base;
}
@@ -333,6 +344,10 @@ u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
struct lmb_region *mem = &lmb.memory;
int i;
+ BUG_ON(0 == size);
+
+ size = lmb_align_up(size, align);
+
for (i = 0; i < mem->cnt; i++) {
u64 ret = lmb_alloc_nid_region(&mem->region[i],
nid_range,
@@ -370,6 +385,8 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
BUG_ON(0 == size);
+ size = lmb_align_up(size, align);
+
/* On some platforms, make sure we allocate lowmem */
/* Note that LMB_REAL_LIMIT may be LMB_ALLOC_ANYWHERE */
if (max_addr == LMB_ALLOC_ANYWHERE)
@@ -393,8 +410,7 @@ u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr)
j = lmb_overlaps_region(&lmb.reserved, base, size);
if (j < 0) {
/* this area isn't reserved, take it */
- if (lmb_add_region(&lmb.reserved, base,
- lmb_align_up(size, align)) < 0)
+ if (lmb_add_region(&lmb.reserved, base, size) < 0)
return 0;
return base;
}
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c
index 77f0f9b775a9..5dc6b29c1575 100644
--- a/lib/lzo/lzo1x_decompress.c
+++ b/lib/lzo/lzo1x_decompress.c
@@ -138,8 +138,7 @@ match:
t += 31 + *ip++;
}
m_pos = op - 1;
- m_pos -= le16_to_cpu(get_unaligned(
- (const unsigned short *)ip)) >> 2;
+ m_pos -= get_unaligned_le16(ip) >> 2;
ip += 2;
} else if (t >= 16) {
m_pos = op;
@@ -157,8 +156,7 @@ match:
}
t += 7 + *ip++;
}
- m_pos -= le16_to_cpu(get_unaligned(
- (const unsigned short *)ip)) >> 2;
+ m_pos -= get_unaligned_le16(ip) >> 2;
ip += 2;
if (m_pos == op)
goto eof_found;
diff --git a/lib/parser.c b/lib/parser.c
index 703c8c13b346..4f0cbc03e0e8 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -182,18 +182,25 @@ int match_hex(substring_t *s, int *result)
}
/**
- * match_strcpy: - copies the characters from a substring_t to a string
- * @to: string to copy characters to.
- * @s: &substring_t to copy
+ * match_strlcpy: - Copy the characters from a substring_t to a sized buffer
+ * @dest: where to copy to
+ * @src: &substring_t to copy
+ * @size: size of destination buffer
*
- * Description: Copies the set of characters represented by the given
- * &substring_t @s to the c-style string @to. Caller guarantees that @to is
- * large enough to hold the characters of @s.
+ * Description: Copy the characters in &substring_t @src to the
+ * c-style string @dest. Copy no more than @size - 1 characters, plus
+ * the terminating NUL. Return length of @src.
*/
-void match_strcpy(char *to, const substring_t *s)
+size_t match_strlcpy(char *dest, const substring_t *src, size_t size)
{
- memcpy(to, s->from, s->to - s->from);
- to[s->to - s->from] = '\0';
+ size_t ret = src->to - src->from;
+
+ if (size) {
+ size_t len = ret >= size ? size - 1 : ret;
+ memcpy(dest, src->from, len);
+ dest[len] = '\0';
+ }
+ return ret;
}
/**
@@ -206,9 +213,10 @@ void match_strcpy(char *to, const substring_t *s)
*/
char *match_strdup(const substring_t *s)
{
- char *p = kmalloc(s->to - s->from + 1, GFP_KERNEL);
+ size_t sz = s->to - s->from + 1;
+ char *p = kmalloc(sz, GFP_KERNEL);
if (p)
- match_strcpy(p, s);
+ match_strlcpy(p, s, sz);
return p;
}
@@ -216,5 +224,5 @@ EXPORT_SYMBOL(match_token);
EXPORT_SYMBOL(match_int);
EXPORT_SYMBOL(match_octal);
EXPORT_SYMBOL(match_hex);
-EXPORT_SYMBOL(match_strcpy);
+EXPORT_SYMBOL(match_strlcpy);
EXPORT_SYMBOL(match_strdup);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 119174494cb5..4a8ba4bf5f6f 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
-s64 __percpu_counter_sum(struct percpu_counter *fbc)
+s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
{
s64 ret;
int cpu;
@@ -62,7 +62,12 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
for_each_online_cpu(cpu) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
+ if (set)
+ *pcount = 0;
}
+ if (set)
+ fbc->count = ret;
+
spin_unlock(&fbc->lock);
return ret;
}
diff --git a/lib/plist.c b/lib/plist.c
index 3074a02272f3..d6c64a824e1d 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -31,12 +31,13 @@
static void plist_check_prev_next(struct list_head *t, struct list_head *p,
struct list_head *n)
{
- if (n->prev != p || p->next != n) {
- printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev);
- printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev);
- printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev);
- WARN_ON(1);
- }
+ WARN(n->prev != p || p->next != n,
+ "top: %p, n: %p, p: %p\n"
+ "prev: %p, n: %p, p: %p\n"
+ "next: %p, n: %p, p: %p\n",
+ t, t->next, t->prev,
+ p, p->next, p->prev,
+ n, n->next, n->prev);
}
static void plist_check_list(struct list_head *top)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index bd521716ab1a..be86b32bc874 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 2001 Momchil Velikov
* Portions Copyright (C) 2001 Christoph Hellwig
- * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
+ * Copyright (C) 2005 SGI, Christoph Lameter
* Copyright (C) 2006 Nick Piggin
*
* This program is free software; you can redistribute it and/or
@@ -88,6 +88,57 @@ static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
return root->gfp_mask & __GFP_BITS_MASK;
}
+static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
+ int offset)
+{
+ __set_bit(offset, node->tags[tag]);
+}
+
+static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
+ int offset)
+{
+ __clear_bit(offset, node->tags[tag]);
+}
+
+static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
+ int offset)
+{
+ return test_bit(offset, node->tags[tag]);
+}
+
+static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
+{
+ root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
+}
+
+static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
+{
+ root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
+}
+
+static inline void root_tag_clear_all(struct radix_tree_root *root)
+{
+ root->gfp_mask &= __GFP_BITS_MASK;
+}
+
+static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
+{
+ return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
+}
+
+/*
+ * Returns 1 if any slot in the node has this tag set.
+ * Otherwise returns 0.
+ */
+static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
+{
+ int idx;
+ for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
+ if (node->tags[tag][idx])
+ return 1;
+ }
+ return 0;
+}
/*
* This assumes that the caller has performed appropriate preallocation, and
* that the caller has pinned this thread of control to the current CPU.
@@ -124,6 +175,17 @@ static void radix_tree_node_rcu_free(struct rcu_head *head)
{
struct radix_tree_node *node =
container_of(head, struct radix_tree_node, rcu_head);
+
+ /*
+ * must only free zeroed nodes into the slab. radix_tree_shrink
+ * can leave us with a non-NULL entry in the first slot, so clear
+ * that here to make sure.
+ */
+ tag_clear(node, 0, 0);
+ tag_clear(node, 1, 0);
+ node->slots[0] = NULL;
+ node->count = 0;
+
kmem_cache_free(radix_tree_node_cachep, node);
}
@@ -165,59 +227,6 @@ out:
}
EXPORT_SYMBOL(radix_tree_preload);
-static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
- int offset)
-{
- __set_bit(offset, node->tags[tag]);
-}
-
-static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
- int offset)
-{
- __clear_bit(offset, node->tags[tag]);
-}
-
-static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
- int offset)
-{
- return test_bit(offset, node->tags[tag]);
-}
-
-static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
-{
- root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
-}
-
-
-static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
-{
- root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
-}
-
-static inline void root_tag_clear_all(struct radix_tree_root *root)
-{
- root->gfp_mask &= __GFP_BITS_MASK;
-}
-
-static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
-{
- return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
-}
-
-/*
- * Returns 1 if any slot in the node has this tag set.
- * Otherwise returns 0.
- */
-static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
-{
- int idx;
- for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
- if (node->tags[tag][idx])
- return 1;
- }
- return 0;
-}
-
/*
* Return the maximum key which can be store into a
* radix tree with height HEIGHT.
@@ -350,18 +359,17 @@ EXPORT_SYMBOL(radix_tree_insert);
* Returns: the slot corresponding to the position @index in the
* radix tree @root. This is useful for update-if-exists operations.
*
- * This function cannot be called under rcu_read_lock, it must be
- * excluded from writers, as must the returned slot for subsequent
- * use by radix_tree_deref_slot() and radix_tree_replace slot.
- * Caller must hold tree write locked across slot lookup and
- * replace.
+ * This function can be called under rcu_read_lock iff the slot is not
+ * modified by radix_tree_replace_slot, otherwise it must be called
+ * exclusive from other writers. Any dereference of the slot must be done
+ * using radix_tree_deref_slot.
*/
void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
{
unsigned int height, shift;
struct radix_tree_node *node, **slot;
- node = root->rnode;
+ node = rcu_dereference(root->rnode);
if (node == NULL)
return NULL;
@@ -381,7 +389,7 @@ void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
do {
slot = (struct radix_tree_node **)
(node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK));
- node = *slot;
+ node = rcu_dereference(*slot);
if (node == NULL)
return NULL;
@@ -658,7 +666,7 @@ unsigned long radix_tree_next_hole(struct radix_tree_root *root,
EXPORT_SYMBOL(radix_tree_next_hole);
static unsigned int
-__lookup(struct radix_tree_node *slot, void **results, unsigned long index,
+__lookup(struct radix_tree_node *slot, void ***results, unsigned long index,
unsigned int max_items, unsigned long *next_index)
{
unsigned int nr_found = 0;
@@ -692,11 +700,9 @@ __lookup(struct radix_tree_node *slot, void **results, unsigned long index,
/* Bottom level: grab some items */
for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
- struct radix_tree_node *node;
index++;
- node = slot->slots[i];
- if (node) {
- results[nr_found++] = rcu_dereference(node);
+ if (slot->slots[i]) {
+ results[nr_found++] = &(slot->slots[i]);
if (nr_found == max_items)
goto out;
}
@@ -750,13 +756,22 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
ret = 0;
while (ret < max_items) {
- unsigned int nr_found;
+ unsigned int nr_found, slots_found, i;
unsigned long next_index; /* Index of next search */
if (cur_index > max_index)
break;
- nr_found = __lookup(node, results + ret, cur_index,
+ slots_found = __lookup(node, (void ***)results + ret, cur_index,
max_items - ret, &next_index);
+ nr_found = 0;
+ for (i = 0; i < slots_found; i++) {
+ struct radix_tree_node *slot;
+ slot = *(((void ***)results)[ret + i]);
+ if (!slot)
+ continue;
+ results[ret + nr_found] = rcu_dereference(slot);
+ nr_found++;
+ }
ret += nr_found;
if (next_index == 0)
break;
@@ -767,12 +782,71 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
}
EXPORT_SYMBOL(radix_tree_gang_lookup);
+/**
+ * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
+ * @root: radix tree root
+ * @results: where the results of the lookup are placed
+ * @first_index: start the lookup from this key
+ * @max_items: place up to this many items at *results
+ *
+ * Performs an index-ascending scan of the tree for present items. Places
+ * their slots at *@results and returns the number of items which were
+ * placed at *@results.
+ *
+ * The implementation is naive.
+ *
+ * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
+ * be dereferenced with radix_tree_deref_slot, and if using only RCU
+ * protection, radix_tree_deref_slot may fail requiring a retry.
+ */
+unsigned int
+radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results,
+ unsigned long first_index, unsigned int max_items)
+{
+ unsigned long max_index;
+ struct radix_tree_node *node;
+ unsigned long cur_index = first_index;
+ unsigned int ret;
+
+ node = rcu_dereference(root->rnode);
+ if (!node)
+ return 0;
+
+ if (!radix_tree_is_indirect_ptr(node)) {
+ if (first_index > 0)
+ return 0;
+ results[0] = (void **)&root->rnode;
+ return 1;
+ }
+ node = radix_tree_indirect_to_ptr(node);
+
+ max_index = radix_tree_maxindex(node->height);
+
+ ret = 0;
+ while (ret < max_items) {
+ unsigned int slots_found;
+ unsigned long next_index; /* Index of next search */
+
+ if (cur_index > max_index)
+ break;
+ slots_found = __lookup(node, results + ret, cur_index,
+ max_items - ret, &next_index);
+ ret += slots_found;
+ if (next_index == 0)
+ break;
+ cur_index = next_index;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
+
/*
* FIXME: the two tag_get()s here should use find_next_bit() instead of
* open-coding the search.
*/
static unsigned int
-__lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
+__lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index,
unsigned int max_items, unsigned long *next_index, unsigned int tag)
{
unsigned int nr_found = 0;
@@ -802,11 +876,9 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
unsigned long j = index & RADIX_TREE_MAP_MASK;
for ( ; j < RADIX_TREE_MAP_SIZE; j++) {
- struct radix_tree_node *node;
index++;
if (!tag_get(slot, tag, j))
continue;
- node = slot->slots[j];
/*
* Even though the tag was found set, we need to
* recheck that we have a non-NULL node, because
@@ -817,9 +889,8 @@ __lookup_tag(struct radix_tree_node *slot, void **results, unsigned long index,
* lookup ->slots[x] without a lock (ie. can't
* rely on its value remaining the same).
*/
- if (node) {
- node = rcu_dereference(node);
- results[nr_found++] = node;
+ if (slot->slots[j]) {
+ results[nr_found++] = &(slot->slots[j]);
if (nr_found == max_items)
goto out;
}
@@ -878,13 +949,22 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
ret = 0;
while (ret < max_items) {
- unsigned int nr_found;
+ unsigned int nr_found, slots_found, i;
unsigned long next_index; /* Index of next search */
if (cur_index > max_index)
break;
- nr_found = __lookup_tag(node, results + ret, cur_index,
- max_items - ret, &next_index, tag);
+ slots_found = __lookup_tag(node, (void ***)results + ret,
+ cur_index, max_items - ret, &next_index, tag);
+ nr_found = 0;
+ for (i = 0; i < slots_found; i++) {
+ struct radix_tree_node *slot;
+ slot = *(((void ***)results)[ret + i]);
+ if (!slot)
+ continue;
+ results[ret + nr_found] = rcu_dereference(slot);
+ nr_found++;
+ }
ret += nr_found;
if (next_index == 0)
break;
@@ -896,6 +976,67 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
/**
+ * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
+ * radix tree based on a tag
+ * @root: radix tree root
+ * @results: where the results of the lookup are placed
+ * @first_index: start the lookup from this key
+ * @max_items: place up to this many items at *results
+ * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
+ *
+ * Performs an index-ascending scan of the tree for present items which
+ * have the tag indexed by @tag set. Places the slots at *@results and
+ * returns the number of slots which were placed at *@results.
+ */
+unsigned int
+radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
+ unsigned long first_index, unsigned int max_items,
+ unsigned int tag)
+{
+ struct radix_tree_node *node;
+ unsigned long max_index;
+ unsigned long cur_index = first_index;
+ unsigned int ret;
+
+ /* check the root's tag bit */
+ if (!root_tag_get(root, tag))
+ return 0;
+
+ node = rcu_dereference(root->rnode);
+ if (!node)
+ return 0;
+
+ if (!radix_tree_is_indirect_ptr(node)) {
+ if (first_index > 0)
+ return 0;
+ results[0] = (void **)&root->rnode;
+ return 1;
+ }
+ node = radix_tree_indirect_to_ptr(node);
+
+ max_index = radix_tree_maxindex(node->height);
+
+ ret = 0;
+ while (ret < max_items) {
+ unsigned int slots_found;
+ unsigned long next_index; /* Index of next search */
+
+ if (cur_index > max_index)
+ break;
+ slots_found = __lookup_tag(node, results + ret,
+ cur_index, max_items - ret, &next_index, tag);
+ ret += slots_found;
+ if (next_index == 0)
+ break;
+ cur_index = next_index;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
+
+
+/**
* radix_tree_shrink - shrink height of a radix tree to minimal
* @root radix tree root
*/
@@ -930,11 +1071,6 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
newptr = radix_tree_ptr_to_indirect(newptr);
root->rnode = newptr;
root->height--;
- /* must only free zeroed nodes into the slab */
- tag_clear(to_free, 0, 0);
- tag_clear(to_free, 1, 0);
- to_free->slots[0] = NULL;
- to_free->count = 0;
radix_tree_node_free(to_free);
}
}
@@ -1047,7 +1183,7 @@ int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
EXPORT_SYMBOL(radix_tree_tagged);
static void
-radix_tree_node_ctor(struct kmem_cache *cachep, void *node)
+radix_tree_node_ctor(void *node)
{
memset(node, 0, sizeof(struct radix_tree_node));
}
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 485e3040dcd4..26187edcc7ea 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -3,6 +3,9 @@
*
* Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
*
+ * 2008-05-01 rewrite the function and use a ratelimit_state data struct as
+ * parameter. Now every user can use their own standalone ratelimit_state.
+ *
* This file is released under the GPLv2.
*
*/
@@ -11,41 +14,44 @@
#include <linux/jiffies.h>
#include <linux/module.h>
+static DEFINE_SPINLOCK(ratelimit_lock);
+
/*
* __ratelimit - rate limiting
- * @ratelimit_jiffies: minimum time in jiffies between two callbacks
- * @ratelimit_burst: number of callbacks we do before ratelimiting
+ * @rs: ratelimit_state data
*
- * This enforces a rate limit: not more than @ratelimit_burst callbacks
- * in every ratelimit_jiffies
+ * This enforces a rate limit: not more than @rs->ratelimit_burst callbacks
+ * in every @rs->ratelimit_jiffies
*/
-int __ratelimit(int ratelimit_jiffies, int ratelimit_burst)
+int __ratelimit(struct ratelimit_state *rs)
{
- static DEFINE_SPINLOCK(ratelimit_lock);
- static unsigned toks = 10 * 5 * HZ;
- static unsigned long last_msg;
- static int missed;
unsigned long flags;
- unsigned long now = jiffies;
- spin_lock_irqsave(&ratelimit_lock, flags);
- toks += now - last_msg;
- last_msg = now;
- if (toks > (ratelimit_burst * ratelimit_jiffies))
- toks = ratelimit_burst * ratelimit_jiffies;
- if (toks >= ratelimit_jiffies) {
- int lost = missed;
-
- missed = 0;
- toks -= ratelimit_jiffies;
- spin_unlock_irqrestore(&ratelimit_lock, flags);
- if (lost)
- printk(KERN_WARNING "%s: %d messages suppressed\n",
- __func__, lost);
+ if (!rs->interval)
return 1;
+
+ spin_lock_irqsave(&ratelimit_lock, flags);
+ if (!rs->begin)
+ rs->begin = jiffies;
+
+ if (time_is_before_jiffies(rs->begin + rs->interval)) {
+ if (rs->missed)
+ printk(KERN_WARNING "%s: %d callbacks suppressed\n",
+ __func__, rs->missed);
+ rs->begin = 0;
+ rs->printed = 0;
+ rs->missed = 0;
}
- missed++;
+ if (rs->burst && rs->burst > rs->printed)
+ goto print;
+
+ rs->missed++;
spin_unlock_irqrestore(&ratelimit_lock, flags);
return 0;
+
+print:
+ rs->printed++;
+ spin_unlock_irqrestore(&ratelimit_lock, flags);
+ return 1;
}
EXPORT_SYMBOL(__ratelimit);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b80c21100d78..876ba6d5b670 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -295,6 +295,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
EXPORT_SYMBOL(sg_alloc_table);
/**
+ * sg_miter_start - start mapping iteration over a sg list
+ * @miter: sg mapping iter to be started
+ * @sgl: sg list to iterate over
+ * @nents: number of sg entries
+ *
+ * Description:
+ * Starts mapping iterator @miter.
+ *
+ * Context:
+ * Don't care.
+ */
+void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
+ unsigned int nents, unsigned int flags)
+{
+ memset(miter, 0, sizeof(struct sg_mapping_iter));
+
+ miter->__sg = sgl;
+ miter->__nents = nents;
+ miter->__offset = 0;
+ miter->__flags = flags;
+}
+EXPORT_SYMBOL(sg_miter_start);
+
+/**
+ * sg_miter_next - proceed mapping iterator to the next mapping
+ * @miter: sg mapping iter to proceed
+ *
+ * Description:
+ * Proceeds @miter@ to the next mapping. @miter@ should have been
+ * started using sg_miter_start(). On successful return,
+ * @miter@->page, @miter@->addr and @miter@->length point to the
+ * current mapping.
+ *
+ * Context:
+ * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
+ * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
+ *
+ * Returns:
+ * true if @miter contains the next mapping. false if end of sg
+ * list is reached.
+ */
+bool sg_miter_next(struct sg_mapping_iter *miter)
+{
+ unsigned int off, len;
+
+ /* check for end and drop resources from the last iteration */
+ if (!miter->__nents)
+ return false;
+
+ sg_miter_stop(miter);
+
+ /* get to the next sg if necessary. __offset is adjusted by stop */
+ if (miter->__offset == miter->__sg->length && --miter->__nents) {
+ miter->__sg = sg_next(miter->__sg);
+ miter->__offset = 0;
+ }
+
+ /* map the next page */
+ off = miter->__sg->offset + miter->__offset;
+ len = miter->__sg->length - miter->__offset;
+
+ miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
+ off &= ~PAGE_MASK;
+ miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
+ miter->consumed = miter->length;
+
+ if (miter->__flags & SG_MITER_ATOMIC)
+ miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
+ else
+ miter->addr = kmap(miter->page) + off;
+
+ return true;
+}
+EXPORT_SYMBOL(sg_miter_next);
+
+/**
+ * sg_miter_stop - stop mapping iteration
+ * @miter: sg mapping iter to be stopped
+ *
+ * Description:
+ * Stops mapping iterator @miter. @miter should have been started
+ * started using sg_miter_start(). A stopped iteration can be
+ * resumed by calling sg_miter_next() on it. This is useful when
+ * resources (kmap) need to be released during iteration.
+ *
+ * Context:
+ * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
+ */
+void sg_miter_stop(struct sg_mapping_iter *miter)
+{
+ WARN_ON(miter->consumed > miter->length);
+
+ /* drop resources from the last iteration */
+ if (miter->addr) {
+ miter->__offset += miter->consumed;
+
+ if (miter->__flags & SG_MITER_ATOMIC) {
+ WARN_ON(!irqs_disabled());
+ kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
+ } else
+ kunmap(miter->addr);
+
+ miter->page = NULL;
+ miter->addr = NULL;
+ miter->length = 0;
+ miter->consumed = 0;
+ }
+}
+EXPORT_SYMBOL(sg_miter_stop);
+
+/**
* sg_copy_buffer - Copy data between a linear buffer and an SG list
* @sgl: The SG list
* @nents: Number of SG entries
@@ -309,56 +420,29 @@ EXPORT_SYMBOL(sg_alloc_table);
static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, int to_buffer)
{
- struct scatterlist *sg;
- size_t buf_off = 0;
- int i;
-
- WARN_ON(!irqs_disabled());
-
- for_each_sg(sgl, sg, nents, i) {
- struct page *page;
- int n = 0;
- unsigned int sg_off = sg->offset;
- unsigned int sg_copy = sg->length;
-
- if (sg_copy > buflen)
- sg_copy = buflen;
- buflen -= sg_copy;
-
- while (sg_copy > 0) {
- unsigned int page_copy;
- void *p;
-
- page_copy = PAGE_SIZE - sg_off;
- if (page_copy > sg_copy)
- page_copy = sg_copy;
-
- page = nth_page(sg_page(sg), n);
- p = kmap_atomic(page, KM_BIO_SRC_IRQ);
-
- if (to_buffer)
- memcpy(buf + buf_off, p + sg_off, page_copy);
- else {
- memcpy(p + sg_off, buf + buf_off, page_copy);
- flush_kernel_dcache_page(page);
- }
-
- kunmap_atomic(p, KM_BIO_SRC_IRQ);
-
- buf_off += page_copy;
- sg_off += page_copy;
- if (sg_off == PAGE_SIZE) {
- sg_off = 0;
- n++;
- }
- sg_copy -= page_copy;
+ unsigned int offset = 0;
+ struct sg_mapping_iter miter;
+
+ sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);
+
+ while (sg_miter_next(&miter) && offset < buflen) {
+ unsigned int len;
+
+ len = min(miter.length, buflen - offset);
+
+ if (to_buffer)
+ memcpy(buf + offset, miter.addr, len);
+ else {
+ memcpy(miter.addr, buf + offset, len);
+ flush_kernel_dcache_page(miter.page);
}
- if (!buflen)
- break;
+ offset += len;
}
- return buf_off;
+ sg_miter_stop(&miter);
+
+ return offset;
}
/**
diff --git a/lib/show_mem.c b/lib/show_mem.c
new file mode 100644
index 000000000000..238e72a18ce1
--- /dev/null
+++ b/lib/show_mem.c
@@ -0,0 +1,63 @@
+/*
+ * Generic show_mem() implementation
+ *
+ * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de>
+ * All code subject to the GPL version 2.
+ */
+
+#include <linux/mm.h>
+#include <linux/nmi.h>
+#include <linux/quicklist.h>
+
+void show_mem(void)
+{
+ pg_data_t *pgdat;
+ unsigned long total = 0, reserved = 0, shared = 0,
+ nonshared = 0, highmem = 0;
+
+ printk(KERN_INFO "Mem-Info:\n");
+ show_free_areas();
+
+ for_each_online_pgdat(pgdat) {
+ unsigned long i, flags;
+
+ pgdat_resize_lock(pgdat, &flags);
+ for (i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page;
+ unsigned long pfn = pgdat->node_start_pfn + i;
+
+ if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
+ touch_nmi_watchdog();
+
+ if (!pfn_valid(pfn))
+ continue;
+
+ page = pfn_to_page(pfn);
+
+ if (PageHighMem(page))
+ highmem++;
+
+ if (PageReserved(page))
+ reserved++;
+ else if (page_count(page) == 1)
+ nonshared++;
+ else if (page_count(page) > 1)
+ shared += page_count(page) - 1;
+
+ total++;
+ }
+ pgdat_resize_unlock(pgdat, &flags);
+ }
+
+ printk(KERN_INFO "%lu pages RAM\n", total);
+#ifdef CONFIG_HIGHMEM
+ printk(KERN_INFO "%lu pages HighMem\n", highmem);
+#endif
+ printk(KERN_INFO "%lu pages reserved\n", reserved);
+ printk(KERN_INFO "%lu pages shared\n", shared);
+ printk(KERN_INFO "%lu pages non-shared\n", nonshared);
+#ifdef CONFIG_QUICKLIST
+ printk(KERN_INFO "%lu pages in pagetable cache\n",
+ quicklist_total_size());
+#endif
+}
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 6c90fb90e19c..0f8fc22ed103 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -7,11 +7,10 @@
#include <linux/kallsyms.h>
#include <linux/sched.h>
-unsigned int debug_smp_processor_id(void)
+notrace unsigned int debug_smp_processor_id(void)
{
unsigned long preempt_count = preempt_count();
int this_cpu = raw_smp_processor_id();
- cpumask_t this_mask;
if (likely(preempt_count))
goto out;
@@ -23,9 +22,7 @@ unsigned int debug_smp_processor_id(void)
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
- this_mask = cpumask_of_cpu(this_cpu);
-
- if (cpus_equal(current->cpus_allowed, this_mask))
+ if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu)))
goto out;
/*
@@ -37,7 +34,7 @@ unsigned int debug_smp_processor_id(void)
/*
* Avoid recursion:
*/
- preempt_disable();
+ preempt_disable_notrace();
if (!printk_ratelimit())
goto out_enable;
@@ -49,7 +46,7 @@ unsigned int debug_smp_processor_id(void)
dump_stack();
out_enable:
- preempt_enable_no_resched();
+ preempt_enable_no_resched_notrace();
out:
return this_cpu;
}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index d568894df8cc..977edbdbc1de 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -492,7 +492,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
*/
dma_addr_t handle;
handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
- if (swiotlb_dma_mapping_error(handle))
+ if (swiotlb_dma_mapping_error(hwdev, handle))
return NULL;
ret = bus_to_virt(handle);
@@ -824,7 +824,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
}
int
-swiotlb_dma_mapping_error(dma_addr_t dma_addr)
+swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
}
diff --git a/lib/syscall.c b/lib/syscall.c
new file mode 100644
index 000000000000..a4f7067f72fa
--- /dev/null
+++ b/lib/syscall.c
@@ -0,0 +1,75 @@
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <asm/syscall.h>
+
+static int collect_syscall(struct task_struct *target, long *callno,
+ unsigned long args[6], unsigned int maxargs,
+ unsigned long *sp, unsigned long *pc)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ if (unlikely(!regs))
+ return -EAGAIN;
+
+ *sp = user_stack_pointer(regs);
+ *pc = instruction_pointer(regs);
+
+ *callno = syscall_get_nr(target, regs);
+ if (*callno != -1L && maxargs > 0)
+ syscall_get_arguments(target, regs, 0, maxargs, args);
+
+ return 0;
+}
+
+/**
+ * task_current_syscall - Discover what a blocked task is doing.
+ * @target: thread to examine
+ * @callno: filled with system call number or -1
+ * @args: filled with @maxargs system call arguments
+ * @maxargs: number of elements in @args to fill
+ * @sp: filled with user stack pointer
+ * @pc: filled with user PC
+ *
+ * If @target is blocked in a system call, returns zero with *@callno
+ * set to the the call's number and @args filled in with its arguments.
+ * Registers not used for system call arguments may not be available and
+ * it is not kosher to use &struct user_regset calls while the system
+ * call is still in progress. Note we may get this result if @target
+ * has finished its system call but not yet returned to user mode, such
+ * as when it's stopped for signal handling or syscall exit tracing.
+ *
+ * If @target is blocked in the kernel during a fault or exception,
+ * returns zero with *@callno set to -1 and does not fill in @args.
+ * If so, it's now safe to examine @target using &struct user_regset
+ * get() calls as long as we're sure @target won't return to user mode.
+ *
+ * Returns -%EAGAIN if @target does not remain blocked.
+ *
+ * Returns -%EINVAL if @maxargs is too large (maximum is six).
+ */
+int task_current_syscall(struct task_struct *target, long *callno,
+ unsigned long args[6], unsigned int maxargs,
+ unsigned long *sp, unsigned long *pc)
+{
+ long state;
+ unsigned long ncsw;
+
+ if (unlikely(maxargs > 6))
+ return -EINVAL;
+
+ if (target == current)
+ return collect_syscall(target, callno, args, maxargs, sp, pc);
+
+ state = target->state;
+ if (unlikely(!state))
+ return -EAGAIN;
+
+ ncsw = wait_task_inactive(target, state);
+ if (unlikely(!ncsw) ||
+ unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) ||
+ unlikely(wait_task_inactive(target, state) != ncsw))
+ return -EAGAIN;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(task_current_syscall);
diff --git a/lib/textsearch.c b/lib/textsearch.c
index be8bda3862f5..9fbcb44c554f 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -54,10 +54,13 @@
* USAGE
*
* Before a search can be performed, a configuration must be created
- * by calling textsearch_prepare() specyfing the searching algorithm and
- * the pattern to look for. The returned configuration may then be used
- * for an arbitary amount of times and even in parallel as long as a
- * separate struct ts_state variable is provided to every instance.
+ * by calling textsearch_prepare() specifying the searching algorithm,
+ * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
+ * to perform case insensitive matching. But it might slow down
+ * performance of algorithm, so you should use it at own your risk.
+ * The returned configuration may then be used for an arbitary
+ * amount of times and even in parallel as long as a separate struct
+ * ts_state variable is provided to every instance.
*
* The actual search is performed by either calling textsearch_find_-
* continuous() for linear data or by providing an own get_next_block()
@@ -89,7 +92,6 @@
* panic("Oh my god, dancing chickens at %d\n", pos);
*
* textsearch_destroy(conf);
- *
* ==========================================================================
*/
@@ -97,6 +99,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/err.h>
#include <linux/textsearch.h>
@@ -264,7 +267,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
return ERR_PTR(-EINVAL);
ops = lookup_ts_algo(algo);
-#ifdef CONFIG_KMOD
+#ifdef CONFIG_MODULES
/*
* Why not always autoload you may ask. Some users are
* in a situation where requesting a module may deadlock,
@@ -279,7 +282,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
if (ops == NULL)
goto errout;
- conf = ops->init(pattern, len, gfp_mask);
+ conf = ops->init(pattern, len, gfp_mask, flags);
if (IS_ERR(conf)) {
err = PTR_ERR(conf);
goto errout;
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index d90822c378a4..9e66ee4020e9 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -39,6 +39,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/ctype.h>
#include <linux/textsearch.h>
/* Alphabet size, use ASCII */
@@ -63,7 +64,8 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
struct ts_bm *bm = ts_config_priv(conf);
unsigned int i, text_len, consumed = state->offset;
const u8 *text;
- int shift = bm->patlen, bs;
+ int shift = bm->patlen - 1, bs;
+ const u8 icase = conf->flags & TS_IGNORECASE;
for (;;) {
text_len = conf->get_next_block(consumed, &text, conf, state);
@@ -75,7 +77,9 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
DEBUGP("Searching in position %d (%c)\n",
shift, text[shift]);
for (i = 0; i < bm->patlen; i++)
- if (text[shift-i] != bm->pattern[bm->patlen-1-i])
+ if ((icase ? toupper(text[shift-i])
+ : text[shift-i])
+ != bm->pattern[bm->patlen-1-i])
goto next;
/* London calling... */
@@ -111,14 +115,18 @@ static int subpattern(u8 *pattern, int i, int j, int g)
return ret;
}
-static void compute_prefix_tbl(struct ts_bm *bm)
+static void compute_prefix_tbl(struct ts_bm *bm, int flags)
{
int i, j, g;
for (i = 0; i < ASIZE; i++)
bm->bad_shift[i] = bm->patlen;
- for (i = 0; i < bm->patlen - 1; i++)
+ for (i = 0; i < bm->patlen - 1; i++) {
bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
+ if (flags & TS_IGNORECASE)
+ bm->bad_shift[tolower(bm->pattern[i])]
+ = bm->patlen - 1 - i;
+ }
/* Compute the good shift array, used to match reocurrences
* of a subpattern */
@@ -135,10 +143,11 @@ static void compute_prefix_tbl(struct ts_bm *bm)
}
static struct ts_config *bm_init(const void *pattern, unsigned int len,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, int flags)
{
struct ts_config *conf;
struct ts_bm *bm;
+ int i;
unsigned int prefix_tbl_len = len * sizeof(unsigned int);
size_t priv_size = sizeof(*bm) + len + prefix_tbl_len;
@@ -146,11 +155,16 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len,
if (IS_ERR(conf))
return conf;
+ conf->flags = flags;
bm = ts_config_priv(conf);
bm->patlen = len;
bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
- memcpy(bm->pattern, pattern, len);
- compute_prefix_tbl(bm);
+ if (flags & TS_IGNORECASE)
+ for (i = 0; i < len; i++)
+ bm->pattern[i] = toupper(((u8 *)pattern)[i]);
+ else
+ memcpy(bm->pattern, pattern, len);
+ compute_prefix_tbl(bm, flags);
return conf;
}
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index af575b61526b..5696a35184e4 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -257,7 +257,7 @@ found_match:
}
static struct ts_config *fsm_init(const void *pattern, unsigned int len,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, int flags)
{
int i, err = -EINVAL;
struct ts_config *conf;
@@ -269,6 +269,9 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len,
if (len % sizeof(struct ts_fsm_token) || ntokens < 1)
goto errout;
+ if (flags & TS_IGNORECASE)
+ goto errout;
+
for (i = 0; i < ntokens; i++) {
struct ts_fsm_token *t = &tokens[i];
@@ -284,6 +287,7 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len,
if (IS_ERR(conf))
return conf;
+ conf->flags = flags;
fsm = ts_config_priv(conf);
fsm->ntokens = ntokens;
memcpy(fsm->tokens, pattern, len);
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 3ced628cab4b..632f783e65f1 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/ctype.h>
#include <linux/textsearch.h>
struct ts_kmp
@@ -47,6 +48,7 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
struct ts_kmp *kmp = ts_config_priv(conf);
unsigned int i, q = 0, text_len, consumed = state->offset;
const u8 *text;
+ const int icase = conf->flags & TS_IGNORECASE;
for (;;) {
text_len = conf->get_next_block(consumed, &text, conf, state);
@@ -55,9 +57,11 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
break;
for (i = 0; i < text_len; i++) {
- while (q > 0 && kmp->pattern[q] != text[i])
+ while (q > 0 && kmp->pattern[q]
+ != (icase ? toupper(text[i]) : text[i]))
q = kmp->prefix_tbl[q - 1];
- if (kmp->pattern[q] == text[i])
+ if (kmp->pattern[q]
+ == (icase ? toupper(text[i]) : text[i]))
q++;
if (unlikely(q == kmp->pattern_len)) {
state->offset = consumed + i + 1;
@@ -72,24 +76,28 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
}
static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
- unsigned int *prefix_tbl)
+ unsigned int *prefix_tbl, int flags)
{
unsigned int k, q;
+ const u8 icase = flags & TS_IGNORECASE;
for (k = 0, q = 1; q < len; q++) {
- while (k > 0 && pattern[k] != pattern[q])
+ while (k > 0 && (icase ? toupper(pattern[k]) : pattern[k])
+ != (icase ? toupper(pattern[q]) : pattern[q]))
k = prefix_tbl[k-1];
- if (pattern[k] == pattern[q])
+ if ((icase ? toupper(pattern[k]) : pattern[k])
+ == (icase ? toupper(pattern[q]) : pattern[q]))
k++;
prefix_tbl[q] = k;
}
}
static struct ts_config *kmp_init(const void *pattern, unsigned int len,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, int flags)
{
struct ts_config *conf;
struct ts_kmp *kmp;
+ int i;
unsigned int prefix_tbl_len = len * sizeof(unsigned int);
size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len;
@@ -97,11 +105,16 @@ static struct ts_config *kmp_init(const void *pattern, unsigned int len,
if (IS_ERR(conf))
return conf;
+ conf->flags = flags;
kmp = ts_config_priv(conf);
kmp->pattern_len = len;
- compute_prefix_tbl(pattern, len, kmp->prefix_tbl);
+ compute_prefix_tbl(pattern, len, kmp->prefix_tbl, flags);
kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len;
- memcpy(kmp->pattern, pattern, len);
+ if (flags & TS_IGNORECASE)
+ for (i = 0; i < len; i++)
+ kmp->pattern[i] = toupper(((u8 *)pattern)[i]);
+ else
+ memcpy(kmp->pattern, pattern, len);
return conf;
}
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 6021757a4496..1dc2d1d18fa8 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -22,6 +22,8 @@
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
+#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
#include <asm/page.h> /* for PAGE_SIZE */
#include <asm/div64.h>
@@ -482,6 +484,89 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
return buf;
}
+static char *string(char *buf, char *end, char *s, int field_width, int precision, int flags)
+{
+ int len, i;
+
+ if ((unsigned long)s < PAGE_SIZE)
+ s = "<NULL>";
+
+ len = strnlen(s, precision);
+
+ if (!(flags & LEFT)) {
+ while (len < field_width--) {
+ if (buf < end)
+ *buf = ' ';
+ ++buf;
+ }
+ }
+ for (i = 0; i < len; ++i) {
+ if (buf < end)
+ *buf = *s;
+ ++buf; ++s;
+ }
+ while (len < field_width--) {
+ if (buf < end)
+ *buf = ' ';
+ ++buf;
+ }
+ return buf;
+}
+
+static inline void *dereference_function_descriptor(void *ptr)
+{
+#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
+ void *p;
+ if (!probe_kernel_address(ptr, p))
+ ptr = p;
+#endif
+ return ptr;
+}
+
+static char *symbol_string(char *buf, char *end, void *ptr, int field_width, int precision, int flags)
+{
+ unsigned long value = (unsigned long) ptr;
+#ifdef CONFIG_KALLSYMS
+ char sym[KSYM_SYMBOL_LEN];
+ sprint_symbol(sym, value);
+ return string(buf, end, sym, field_width, precision, flags);
+#else
+ field_width = 2*sizeof(void *);
+ flags |= SPECIAL | SMALL | ZEROPAD;
+ return number(buf, end, value, 16, field_width, precision, flags);
+#endif
+}
+
+/*
+ * Show a '%p' thing. A kernel extension is that the '%p' is followed
+ * by an extra set of alphanumeric characters that are extended format
+ * specifiers.
+ *
+ * Right now we just handle 'F' (for symbolic Function descriptor pointers)
+ * and 'S' (for Symbolic direct pointers), but this can easily be
+ * extended in the future (network address types etc).
+ *
+ * The difference between 'S' and 'F' is that on ia64 and ppc64 function
+ * pointers are really function descriptors, which contain a pointer the
+ * real address.
+ */
+static char *pointer(const char *fmt, char *buf, char *end, void *ptr, int field_width, int precision, int flags)
+{
+ switch (*fmt) {
+ case 'F':
+ ptr = dereference_function_descriptor(ptr);
+ /* Fallthrough */
+ case 'S':
+ return symbol_string(buf, end, ptr, field_width, precision, flags);
+ }
+ flags |= SMALL;
+ if (field_width == -1) {
+ field_width = 2*sizeof(void *);
+ flags |= ZEROPAD;
+ }
+ return number(buf, end, (unsigned long) ptr, 16, field_width, precision, flags);
+}
+
/**
* vsnprintf - Format a string and place it in a buffer
* @buf: The buffer to place the result into
@@ -502,11 +587,9 @@ static char *number(char *buf, char *end, unsigned long long num, int base, int
*/
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
- int len;
unsigned long long num;
- int i, base;
+ int base;
char *str, *end, c;
- const char *s;
int flags; /* flags to number() */
@@ -622,43 +705,18 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
continue;
case 's':
- s = va_arg(args, char *);
- if ((unsigned long)s < PAGE_SIZE)
- s = "<NULL>";
-
- len = strnlen(s, precision);
-
- if (!(flags & LEFT)) {
- while (len < field_width--) {
- if (str < end)
- *str = ' ';
- ++str;
- }
- }
- for (i = 0; i < len; ++i) {
- if (str < end)
- *str = *s;
- ++str; ++s;
- }
- while (len < field_width--) {
- if (str < end)
- *str = ' ';
- ++str;
- }
+ str = string(str, end, va_arg(args, char *), field_width, precision, flags);
continue;
case 'p':
- flags |= SMALL;
- if (field_width == -1) {
- field_width = 2*sizeof(void *);
- flags |= ZEROPAD;
- }
- str = number(str, end,
- (unsigned long) va_arg(args, void *),
- 16, field_width, precision, flags);
+ str = pointer(fmt+1, str, end,
+ va_arg(args, void *),
+ field_width, precision, flags);
+ /* Skip all alphanumeric pointer suffixes */
+ while (isalnum(fmt[1]))
+ fmt++;
continue;
-
case 'n':
/* FIXME:
* What does C99 say about the overflow case here? */