aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug43
-rw-r--r--lib/Makefile8
-rw-r--r--lib/bitmap.c54
-rw-r--r--lib/errno.c7
-rw-r--r--lib/genalloc.c63
-rw-r--r--lib/ioremap.c92
-rw-r--r--lib/irq_regs.c17
-rw-r--r--lib/kobject.c1
-rw-r--r--lib/list_debug.c76
-rw-r--r--lib/radix-tree.c6
-rw-r--r--lib/random32.c142
-rw-r--r--lib/rbtree.c6
-rw-r--r--lib/reed_solomon/reed_solomon.c2
-rw-r--r--lib/rwsem-spinlock.c2
-rw-r--r--lib/rwsem.c4
-rw-r--r--lib/sort.c10
-rw-r--r--lib/spinlock_debug.c19
-rw-r--r--lib/ts_fsm.c10
18 files changed, 498 insertions, 64 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index f1ac3184dc08..77491e311791 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -71,7 +71,7 @@ config LOG_BUF_SHIFT
config DETECT_SOFTLOCKUP
bool "Detect Soft Lockups"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && !S390
default y
help
Say Y here to enable the kernel to detect "soft lockups",
@@ -284,7 +284,7 @@ config DEBUG_HIGHMEM
config DEBUG_BUGVERBOSE
bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED
depends on BUG
- depends on ARM || ARM26 || AVR32 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV
+ depends on ARM || ARM26 || AVR32 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV || SUPERH
default !EMBEDDED
help
Say Y here to make BUG() panics output the file name and line number
@@ -320,9 +320,18 @@ config DEBUG_VM
If unsure, say N.
+config DEBUG_LIST
+ bool "Debug linked list manipulation"
+ depends on DEBUG_KERNEL
+ help
+ Enable this to turn on extended checks in the linked-list
+ walking routines.
+
+ If unsure, say N.
+
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
- depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || AVR32)
+ depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML || S390 || AVR32 || SUPERH)
default y if DEBUG_INFO && UML
help
If you say Y here the resulting kernel image will be slightly larger
@@ -362,6 +371,20 @@ config FORCED_INLINING
become the default in the future, until then this option is there to
test gcc for this.
+config HEADERS_CHECK
+ bool "Run 'make headers_check' when building vmlinux"
+ depends on !UML
+ help
+ This option will extract the user-visible kernel headers whenever
+ building the kernel, and will run basic sanity checks on them to
+ ensure that exported files do not attempt to include files which
+ were not exported, etc.
+
+ If you're making modifications to header files which are
+ relevant for userspace, say 'Y', and check the headers
+ exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
+ your build tree), to make sure they're suitable.
+
config RCU_TORTURE_TEST
tristate "torture tests for RCU"
depends on DEBUG_KERNEL
@@ -375,3 +398,17 @@ config RCU_TORTURE_TEST
at boot time (you probably don't).
Say M if you want the RCU torture tests to build as a module.
Say N if you are unsure.
+
+config LKDTM
+ tristate "Linux Kernel Dump Test Tool Module"
+ depends on KPROBES
+ default n
+ help
+ This module enables testing of the different dumping mechanisms by
+ inducing system failures at predefined crash points.
+ If you don't need it: say N
+ Choose M here to compile this code as a module. The module will be
+ called lkdtm.
+
+ Documentation on how to use the module can be found in
+ drivers/misc/lkdtm.c
diff --git a/lib/Makefile b/lib/Makefile
index ef1d37afbbb6..cf98fabaa549 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -2,16 +2,17 @@
# Makefile for some libs needed in the kernel.
#
-lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
+lib-y := ctype.o string.o vsprintf.o cmdline.o \
bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
- sha1.o
+ sha1.o irq_regs.o
+lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o kref.o kobject_uevent.o klist.o
-obj-y += sort.o parser.o halfmd4.o iomap_copy.o debug_locks.o
+obj-y += sort.o parser.o halfmd4.o iomap_copy.o debug_locks.o random32.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -28,6 +29,7 @@ lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
obj-$(CONFIG_PLIST) += plist.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
+obj-$(CONFIG_DEBUG_LIST) += list_debug.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index d71e38c54ea5..037fa9aa2ed7 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -316,10 +316,11 @@ int bitmap_scnprintf(char *buf, unsigned int buflen,
EXPORT_SYMBOL(bitmap_scnprintf);
/**
- * bitmap_parse - convert an ASCII hex string into a bitmap.
- * @ubuf: pointer to buffer in user space containing string.
- * @ubuflen: buffer size in bytes. If string is smaller than this
+ * __bitmap_parse - convert an ASCII hex string into a bitmap.
+ * @buf: pointer to buffer containing string.
+ * @buflen: buffer size in bytes. If string is smaller than this
* then it must be terminated with a \0.
+ * @is_user: location of buffer, 0 indicates kernel space
* @maskp: pointer to bitmap array that will contain result.
* @nmaskbits: size of bitmap, in bits.
*
@@ -330,11 +331,13 @@ EXPORT_SYMBOL(bitmap_scnprintf);
* characters and for grouping errors such as "1,,5", ",44", "," and "".
* Leading and trailing whitespace accepted, but not embedded whitespace.
*/
-int bitmap_parse(const char __user *ubuf, unsigned int ubuflen,
- unsigned long *maskp, int nmaskbits)
+int __bitmap_parse(const char *buf, unsigned int buflen,
+ int is_user, unsigned long *maskp,
+ int nmaskbits)
{
int c, old_c, totaldigits, ndigits, nchunks, nbits;
u32 chunk;
+ const char __user *ubuf = buf;
bitmap_zero(maskp, nmaskbits);
@@ -343,11 +346,15 @@ int bitmap_parse(const char __user *ubuf, unsigned int ubuflen,
chunk = ndigits = 0;
/* Get the next chunk of the bitmap */
- while (ubuflen) {
+ while (buflen) {
old_c = c;
- if (get_user(c, ubuf++))
- return -EFAULT;
- ubuflen--;
+ if (is_user) {
+ if (__get_user(c, ubuf++))
+ return -EFAULT;
+ }
+ else
+ c = *buf++;
+ buflen--;
if (isspace(c))
continue;
@@ -388,11 +395,36 @@ int bitmap_parse(const char __user *ubuf, unsigned int ubuflen,
nbits += (nchunks == 1) ? nbits_to_hold_value(chunk) : CHUNKSZ;
if (nbits > nmaskbits)
return -EOVERFLOW;
- } while (ubuflen && c == ',');
+ } while (buflen && c == ',');
return 0;
}
-EXPORT_SYMBOL(bitmap_parse);
+EXPORT_SYMBOL(__bitmap_parse);
+
+/**
+ * bitmap_parse_user()
+ *
+ * @ubuf: pointer to user buffer containing string.
+ * @ulen: buffer size in bytes. If string is smaller than this
+ * then it must be terminated with a \0.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ *
+ * Wrapper for __bitmap_parse(), providing it with user buffer.
+ *
+ * We cannot have this as an inline function in bitmap.h because it needs
+ * linux/uaccess.h to get the access_ok() declaration and this causes
+ * cyclic dependencies.
+ */
+int bitmap_parse_user(const char __user *ubuf,
+ unsigned int ulen, unsigned long *maskp,
+ int nmaskbits)
+{
+ if (!access_ok(VERIFY_READ, ubuf, ulen))
+ return -EFAULT;
+ return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
+}
+EXPORT_SYMBOL(bitmap_parse_user);
/*
* bscnl_emit(buf, buflen, rbot, rtop, bp)
diff --git a/lib/errno.c b/lib/errno.c
deleted file mode 100644
index 41cb9d76c052..000000000000
--- a/lib/errno.c
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * linux/lib/errno.c
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- */
-
-int errno;
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 71338b48e889..75ae68ce03e1 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -14,11 +14,13 @@
#include <linux/genalloc.h>
-/*
- * Create a new special memory pool.
- *
+/**
+ * gen_pool_create - create a new special memory pool
* @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
* @nid: node id of the node the pool structure should be allocated on, or -1
+ *
+ * Create a new special memory pool that can be used to manage special purpose
+ * memory not managed by the regular kmalloc/kfree interface.
*/
struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
{
@@ -34,15 +36,15 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
}
EXPORT_SYMBOL(gen_pool_create);
-
-/*
- * Add a new chunk of memory to the specified pool.
- *
+/**
+ * gen_pool_add - add a new chunk of special memory to the pool
* @pool: pool to add new memory chunk to
* @addr: starting address of memory chunk to add to pool
* @size: size in bytes of the memory chunk to add to pool
* @nid: node id of the node the chunk structure and bitmap should be
* allocated on, or -1
+ *
+ * Add a new chunk of special memory to the specified pool.
*/
int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
int nid)
@@ -69,13 +71,44 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
}
EXPORT_SYMBOL(gen_pool_add);
-
-/*
- * Allocate the requested number of bytes from the specified pool.
- * Uses a first-fit algorithm.
+/**
+ * gen_pool_destroy - destroy a special memory pool
+ * @pool: pool to destroy
*
+ * Destroy the specified special memory pool. Verifies that there are no
+ * outstanding allocations.
+ */
+void gen_pool_destroy(struct gen_pool *pool)
+{
+ struct list_head *_chunk, *_next_chunk;
+ struct gen_pool_chunk *chunk;
+ int order = pool->min_alloc_order;
+ int bit, end_bit;
+
+
+ write_lock(&pool->lock);
+ list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
+ chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
+ list_del(&chunk->next_chunk);
+
+ end_bit = (chunk->end_addr - chunk->start_addr) >> order;
+ bit = find_next_bit(chunk->bits, end_bit, 0);
+ BUG_ON(bit < end_bit);
+
+ kfree(chunk);
+ }
+ kfree(pool);
+ return;
+}
+EXPORT_SYMBOL(gen_pool_destroy);
+
+/**
+ * gen_pool_alloc - allocate special memory from the pool
* @pool: pool to allocate from
* @size: number of bytes to allocate from the pool
+ *
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses a first-fit algorithm.
*/
unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
{
@@ -127,13 +160,13 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
}
EXPORT_SYMBOL(gen_pool_alloc);
-
-/*
- * Free the specified memory back to the specified pool.
- *
+/**
+ * gen_pool_free - free allocated special memory back to the pool
* @pool: pool to free to
* @addr: starting address of memory to free back to pool
* @size: size in bytes of memory to free
+ *
+ * Free previously allocated special memory back to the specified pool.
*/
void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
{
diff --git a/lib/ioremap.c b/lib/ioremap.c
new file mode 100644
index 000000000000..99fa277f9f7b
--- /dev/null
+++ b/lib/ioremap.c
@@ -0,0 +1,92 @@
+/*
+ * Re-map IO memory to kernel address space so that we can access it.
+ * This is needed for high PCI addresses that aren't mapped in the
+ * 640k-1MB IO memory area on PC's
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ */
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+
+static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
+ unsigned long end, unsigned long phys_addr, pgprot_t prot)
+{
+ pte_t *pte;
+ unsigned long pfn;
+
+ pfn = phys_addr >> PAGE_SHIFT;
+ pte = pte_alloc_kernel(pmd, addr);
+ if (!pte)
+ return -ENOMEM;
+ do {
+ BUG_ON(!pte_none(*pte));
+ set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ return 0;
+}
+
+static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
+ unsigned long end, unsigned long phys_addr, pgprot_t prot)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ phys_addr -= addr;
+ pmd = pmd_alloc(&init_mm, pud, addr);
+ if (!pmd)
+ return -ENOMEM;
+ do {
+ next = pmd_addr_end(addr, end);
+ if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
+ return -ENOMEM;
+ } while (pmd++, addr = next, addr != end);
+ return 0;
+}
+
+static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
+ unsigned long end, unsigned long phys_addr, pgprot_t prot)
+{
+ pud_t *pud;
+ unsigned long next;
+
+ phys_addr -= addr;
+ pud = pud_alloc(&init_mm, pgd, addr);
+ if (!pud)
+ return -ENOMEM;
+ do {
+ next = pud_addr_end(addr, end);
+ if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
+ return -ENOMEM;
+ } while (pud++, addr = next, addr != end);
+ return 0;
+}
+
+int ioremap_page_range(unsigned long addr,
+ unsigned long end, unsigned long phys_addr, pgprot_t prot)
+{
+ pgd_t *pgd;
+ unsigned long start;
+ unsigned long next;
+ int err;
+
+ BUG_ON(addr >= end);
+
+ start = addr;
+ phys_addr -= addr;
+ pgd = pgd_offset_k(addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
+ if (err)
+ break;
+ } while (pgd++, addr = next, addr != end);
+
+ flush_cache_vmap(start, end);
+
+ return err;
+}
diff --git a/lib/irq_regs.c b/lib/irq_regs.c
new file mode 100644
index 000000000000..753880a5440c
--- /dev/null
+++ b/lib/irq_regs.c
@@ -0,0 +1,17 @@
+/* saved per-CPU IRQ register pointer
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <asm/irq_regs.h>
+
+#ifndef ARCH_HAS_OWN_IRQ_REGS
+DEFINE_PER_CPU(struct pt_regs *, __irq_regs);
+EXPORT_PER_CPU_SYMBOL(__irq_regs);
+#endif
diff --git a/lib/kobject.c b/lib/kobject.c
index 1699eb9161f3..7dd5c0e9d996 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -119,6 +119,7 @@ char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
return path;
}
+EXPORT_SYMBOL_GPL(kobject_get_path);
/**
* kobject_init - initialize object.
diff --git a/lib/list_debug.c b/lib/list_debug.c
new file mode 100644
index 000000000000..7ba9d823d388
--- /dev/null
+++ b/lib/list_debug.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2006, Red Hat, Inc., Dave Jones
+ * Released under the General Public License (GPL).
+ *
+ * This file contains the linked list implementations for
+ * DEBUG_LIST.
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the prev/next entries already!
+ */
+
+void __list_add(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ if (unlikely(next->prev != prev)) {
+ printk(KERN_ERR "list_add corruption. next->prev should be %p, but was %p\n",
+ prev, next->prev);
+ BUG();
+ }
+ if (unlikely(prev->next != next)) {
+ printk(KERN_ERR "list_add corruption. prev->next should be %p, but was %p\n",
+ next, prev->next);
+ BUG();
+ }
+ next->prev = new;
+ new->next = next;
+ new->prev = prev;
+ prev->next = new;
+}
+EXPORT_SYMBOL(__list_add);
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+void list_add(struct list_head *new, struct list_head *head)
+{
+ __list_add(new, head, head->next);
+}
+EXPORT_SYMBOL(list_add);
+
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
+void list_del(struct list_head *entry)
+{
+ if (unlikely(entry->prev->next != entry)) {
+ printk(KERN_ERR "list_del corruption. prev->next should be %p, "
+ "but was %p\n", entry, entry->prev->next);
+ BUG();
+ }
+ if (unlikely(entry->next->prev != entry)) {
+ printk(KERN_ERR "list_del corruption. next->prev should be %p, "
+ "but was %p\n", entry, entry->next->prev);
+ BUG();
+ }
+ __list_del(entry->prev, entry->next);
+ entry->next = LIST_POISON1;
+ entry->prev = LIST_POISON2;
+}
+EXPORT_SYMBOL(list_del);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 637d55608de5..aa9bfd0bdbd1 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -160,13 +160,13 @@ static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
{
- root->gfp_mask |= (1 << (tag + __GFP_BITS_SHIFT));
+ root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
}
static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag)
{
- root->gfp_mask &= ~(1 << (tag + __GFP_BITS_SHIFT));
+ root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
}
static inline void root_tag_clear_all(struct radix_tree_root *root)
@@ -176,7 +176,7 @@ static inline void root_tag_clear_all(struct radix_tree_root *root)
static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
{
- return root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
+ return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
}
/*
diff --git a/lib/random32.c b/lib/random32.c
new file mode 100644
index 000000000000..4a15ce51cea7
--- /dev/null
+++ b/lib/random32.c
@@ -0,0 +1,142 @@
+/*
+ This is a maximally equidistributed combined Tausworthe generator
+ based on code from GNU Scientific Library 1.5 (30 Jun 2004)
+
+ x_n = (s1_n ^ s2_n ^ s3_n)
+
+ s1_{n+1} = (((s1_n & 4294967294) <<12) ^ (((s1_n <<13) ^ s1_n) >>19))
+ s2_{n+1} = (((s2_n & 4294967288) << 4) ^ (((s2_n << 2) ^ s2_n) >>25))
+ s3_{n+1} = (((s3_n & 4294967280) <<17) ^ (((s3_n << 3) ^ s3_n) >>11))
+
+ The period of this generator is about 2^88.
+
+ From: P. L'Ecuyer, "Maximally Equidistributed Combined Tausworthe
+ Generators", Mathematics of Computation, 65, 213 (1996), 203--213.
+
+ This is available on the net from L'Ecuyer's home page,
+
+ http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps
+ ftp://ftp.iro.umontreal.ca/pub/simulation/lecuyer/papers/tausme.ps
+
+ There is an erratum in the paper "Tables of Maximally
+ Equidistributed Combined LFSR Generators", Mathematics of
+ Computation, 68, 225 (1999), 261--269:
+ http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme2.ps
+
+ ... the k_j most significant bits of z_j must be non-
+ zero, for each j. (Note: this restriction also applies to the
+ computer code given in [4], but was mistakenly not mentioned in
+ that paper.)
+
+ This affects the seeding procedure by imposing the requirement
+ s1 > 1, s2 > 7, s3 > 15.
+
+*/
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <linux/module.h>
+#include <linux/random.h>
+
+struct rnd_state {
+ u32 s1, s2, s3;
+};
+
+static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
+
+static u32 __random32(struct rnd_state *state)
+{
+#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
+
+ state->s1 = TAUSWORTHE(state->s1, 13, 19, 4294967294UL, 12);
+ state->s2 = TAUSWORTHE(state->s2, 2, 25, 4294967288UL, 4);
+ state->s3 = TAUSWORTHE(state->s3, 3, 11, 4294967280UL, 17);
+
+ return (state->s1 ^ state->s2 ^ state->s3);
+}
+
+static void __set_random32(struct rnd_state *state, unsigned long s)
+{
+ if (s == 0)
+ s = 1; /* default seed is 1 */
+
+#define LCG(n) (69069 * n)
+ state->s1 = LCG(s);
+ state->s2 = LCG(state->s1);
+ state->s3 = LCG(state->s2);
+
+ /* "warm it up" */
+ __random32(state);
+ __random32(state);
+ __random32(state);
+ __random32(state);
+ __random32(state);
+ __random32(state);
+}
+
+/**
+ * random32 - pseudo random number generator
+ *
+ * A 32 bit pseudo-random number is generated using a fast
+ * algorithm suitable for simulation. This algorithm is NOT
+ * considered safe for cryptographic use.
+ */
+u32 random32(void)
+{
+ unsigned long r;
+ struct rnd_state *state = &get_cpu_var(net_rand_state);
+ r = __random32(state);
+ put_cpu_var(state);
+ return r;
+}
+EXPORT_SYMBOL(random32);
+
+/**
+ * srandom32 - add entropy to pseudo random number generator
+ * @seed: seed value
+ *
+ * Add some additional seeding to the random32() pool.
+ * Note: this pool is per cpu so it only affects current CPU.
+ */
+void srandom32(u32 entropy)
+{
+ struct rnd_state *state = &get_cpu_var(net_rand_state);
+ __set_random32(state, state->s1 ^ entropy);
+ put_cpu_var(state);
+}
+EXPORT_SYMBOL(srandom32);
+
+/*
+ * Generate some initially weak seeding values to allow
+ * to start the random32() engine.
+ */
+static int __init random32_init(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct rnd_state *state = &per_cpu(net_rand_state,i);
+ __set_random32(state, i + jiffies);
+ }
+ return 0;
+}
+core_initcall(random32_init);
+
+/*
+ * Generate better values after random number generator
+ * is fully initalized.
+ */
+static int __init random32_reseed(void)
+{
+ int i;
+ unsigned long seed;
+
+ for_each_possible_cpu(i) {
+ struct rnd_state *state = &per_cpu(net_rand_state,i);
+
+ get_random_bytes(&seed, sizeof(seed));
+ __set_random32(state, seed);
+ }
+ return 0;
+}
+late_initcall(random32_reseed);
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 1e55ba1c2edf..48499c2d88cc 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -322,6 +322,9 @@ struct rb_node *rb_next(struct rb_node *node)
{
struct rb_node *parent;
+ if (rb_parent(node) == node)
+ return NULL;
+
/* If we have a right-hand child, go down and then left as far
as we can. */
if (node->rb_right) {
@@ -348,6 +351,9 @@ struct rb_node *rb_prev(struct rb_node *node)
{
struct rb_node *parent;
+ if (rb_parent(node) == node)
+ return NULL;
+
/* If we have a left-hand child, go down and then right as far
as we can. */
if (node->rb_left) {
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index 2cc11faa4ff1..a4b730a2180c 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -1,5 +1,5 @@
/*
- * lib/reed_solomon/rslib.c
+ * lib/reed_solomon/reed_solomon.c
*
* Overview:
* Generic Reed Solomon encoder / decoder library
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
index db4fed74b940..c4cfd6c0342f 100644
--- a/lib/rwsem-spinlock.c
+++ b/lib/rwsem-spinlock.c
@@ -28,7 +28,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
- lockdep_init_map(&sem->dep_map, name, key);
+ lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->activity = 0;
spin_lock_init(&sem->wait_lock);
diff --git a/lib/rwsem.c b/lib/rwsem.c
index b322421c2969..cdb4e3d05607 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -19,7 +19,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
- lockdep_init_map(&sem->dep_map, name, key);
+ lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
@@ -146,7 +146,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
/*
* wait for a lock to be granted
*/
-static inline struct rw_semaphore *
+static struct rw_semaphore *
rwsem_down_failed_common(struct rw_semaphore *sem,
struct rwsem_waiter *waiter, signed long adjustment)
{
diff --git a/lib/sort.c b/lib/sort.c
index 5f3b51ffa1dc..488788b341cb 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -49,15 +49,15 @@ void sort(void *base, size_t num, size_t size,
void (*swap)(void *, void *, int size))
{
/* pre-scale counters for performance */
- int i = (num/2) * size, n = num * size, c, r;
+ int i = (num/2 - 1) * size, n = num * size, c, r;
if (!swap)
swap = (size == 4 ? u32_swap : generic_swap);
/* heapify */
for ( ; i >= 0; i -= size) {
- for (r = i; r * 2 < n; r = c) {
- c = r * 2;
+ for (r = i; r * 2 + size < n; r = c) {
+ c = r * 2 + size;
if (c < n - size && cmp(base + c, base + c + size) < 0)
c += size;
if (cmp(base + r, base + c) >= 0)
@@ -69,8 +69,8 @@ void sort(void *base, size_t num, size_t size,
/* sort */
for (i = n - size; i >= 0; i -= size) {
swap(base, base + i, size);
- for (r = 0; r * 2 < i; r = c) {
- c = r * 2;
+ for (r = 0; r * 2 + size < i; r = c) {
+ c = r * 2 + size;
if (c < i - size && cmp(base + c, base + c + size) < 0)
c += size;
if (cmp(base + r, base + c) >= 0)
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 58c577dd82e5..b6c4f898197c 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -20,7 +20,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name,
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
- lockdep_init_map(&lock->dep_map, name, key);
+ lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
lock->magic = SPINLOCK_MAGIC;
@@ -38,7 +38,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
- lockdep_init_map(&lock->dep_map, name, key);
+ lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
lock->magic = RWLOCK_MAGIC;
@@ -99,11 +99,12 @@ static inline void debug_spin_unlock(spinlock_t *lock)
static void __spin_lock_debug(spinlock_t *lock)
{
- int print_once = 1;
u64 i;
+ u64 loops = loops_per_jiffy * HZ;
+ int print_once = 1;
for (;;) {
- for (i = 0; i < loops_per_jiffy * HZ; i++) {
+ for (i = 0; i < loops; i++) {
if (__raw_spin_trylock(&lock->raw_lock))
return;
__delay(1);
@@ -165,11 +166,12 @@ static void rwlock_bug(rwlock_t *lock, const char *msg)
#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
static void __read_lock_debug(rwlock_t *lock)
{
- int print_once = 1;
u64 i;
+ u64 loops = loops_per_jiffy * HZ;
+ int print_once = 1;
for (;;) {
- for (i = 0; i < loops_per_jiffy * HZ; i++) {
+ for (i = 0; i < loops; i++) {
if (__raw_read_trylock(&lock->raw_lock))
return;
__delay(1);
@@ -239,11 +241,12 @@ static inline void debug_write_unlock(rwlock_t *lock)
#if 0 /* This can cause lockups */
static void __write_lock_debug(rwlock_t *lock)
{
- int print_once = 1;
u64 i;
+ u64 loops = loops_per_jiffy * HZ;
+ int print_once = 1;
for (;;) {
- for (i = 0; i < loops_per_jiffy * HZ; i++) {
+ for (i = 0; i < loops; i++) {
if (__raw_write_trylock(&lock->raw_lock))
return;
__delay(1);
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index 87847c2ae9e2..af575b61526b 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -12,13 +12,13 @@
*
* A finite state machine consists of n states (struct ts_fsm_token)
* representing the pattern as a finite automation. The data is read
- * sequentially on a octet basis. Every state token specifies the number
+ * sequentially on an octet basis. Every state token specifies the number
* of recurrences and the type of value accepted which can be either a
* specific character or ctype based set of characters. The available
* type of recurrences include 1, (0|1), [0 n], and [1 n].
*
- * The algorithm differs between strict/non-strict mode specyfing
- * whether the pattern has to start at the first octect. Strict mode
+ * The algorithm differs between strict/non-strict mode specifying
+ * whether the pattern has to start at the first octet. Strict mode
* is enabled by default and can be disabled by inserting
* TS_FSM_HEAD_IGNORE as the first token in the chain.
*
@@ -44,7 +44,7 @@ struct ts_fsm
#define _W 0x200 /* wildcard */
/* Map to _ctype flags and some magic numbers */
-static u16 token_map[TS_FSM_TYPE_MAX+1] = {
+static const u16 token_map[TS_FSM_TYPE_MAX+1] = {
[TS_FSM_SPECIFIC] = 0,
[TS_FSM_WILDCARD] = _W,
[TS_FSM_CNTRL] = _C,
@@ -61,7 +61,7 @@ static u16 token_map[TS_FSM_TYPE_MAX+1] = {
[TS_FSM_ASCII] = _A,
};
-static u16 token_lookup_tbl[256] = {
+static const u16 token_lookup_tbl[256] = {
_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */
_W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */
_W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */