aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt37
-rw-r--r--Documentation/vm/slub.txt135
-rw-r--r--arch/i386/kernel/smpboot.c5
-rw-r--r--arch/m68k/Kconfig13
-rw-r--r--arch/m68k/Makefile1
-rw-r--r--arch/m68k/kernel/Makefile3
-rw-r--r--arch/m68k/kernel/module.c31
-rw-r--r--arch/m68k/kernel/module.lds7
-rw-r--r--arch/m68k/kernel/setup.c37
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds5
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds5
-rw-r--r--arch/m68k/mm/init.c119
-rw-r--r--arch/m68k/mm/memory.c73
-rw-r--r--arch/m68k/mm/motorola.c104
-rw-r--r--arch/m68k/sun3/config.c2
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/char/drm/drm_drawable.c41
-rw-r--r--drivers/char/drm/drm_pciids.h7
-rw-r--r--drivers/char/drm/i915_irq.c2
-rw-r--r--drivers/pci/quirks.c18
-rw-r--r--drivers/pcmcia/at91_cf.c13
-rw-r--r--drivers/video/neofb.c30
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/nfs/direct.c12
-rw-r--r--fs/ntfs/inode.c2
-rw-r--r--fs/ramfs/file-nommu.c2
-rw-r--r--include/asm-m68k/mmzone.h9
-rw-r--r--include/asm-m68k/module.h34
-rw-r--r--include/asm-m68k/motorola_pgtable.h10
-rw-r--r--include/asm-m68k/page.h77
-rw-r--r--include/asm-m68k/pgalloc.h3
-rw-r--r--include/asm-m68k/pgtable.h17
-rw-r--r--include/asm-m68k/sun3_pgtable.h4
-rw-r--r--include/asm-m68k/virtconvert.h49
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slub.c7
36 files changed, 601 insertions, 318 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index aae2282600ca..ce91560229f5 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1132,9 +1132,9 @@ and is between 256 and 4096 characters. It is defined in the file
when set.
Format: <int>
- noaliencache [MM, NUMA] Disables the allcoation of alien caches in
- the slab allocator. Saves per-node memory, but will
- impact performance on real NUMA hardware.
+ noaliencache [MM, NUMA, SLAB] Disables the allocation of alien
+ caches in the slab allocator. Saves per-node memory,
+ but will impact performance.
noalign [KNL,ARM]
@@ -1613,6 +1613,37 @@ and is between 256 and 4096 characters. It is defined in the file
slram= [HW,MTD]
+ slub_debug [MM, SLUB]
+ Enabling slub_debug allows one to determine the culprit
+ if slab objects become corrupted. Enabling slub_debug
+ creates guard zones around objects and poisons objects
+ when not in use. Also tracks the last alloc / free.
+ For more information see Documentation/vm/slub.txt.
+
+ slub_max_order= [MM, SLUB]
+ Determines the maximum allowed order for slabs. Setting
+ this too high may cause fragmentation.
+ For more information see Documentation/vm/slub.txt.
+
+ slub_min_objects= [MM, SLUB]
+ The minimum objects per slab. SLUB will increase the
+ slab order up to slub_max_order to generate a
+ sufficiently big slab to satisfy the number of objects.
+ The higher the number of objects the smaller the overhead
+ of tracking slabs.
+ For more information see Documentation/vm/slub.txt.
+
+ slub_min_order= [MM, SLUB]
+ Determines the mininum page order for slabs. Must be
+ lower than slub_max_order
+ For more information see Documentation/vm/slub.txt.
+
+ slub_nomerge [MM, SLUB]
+ Disable merging of slabs of similar size. May be
+ necessary if there is some reason to distinguish
+ allocs to different slabs.
+ For more information see Documentation/vm/slub.txt.
+
smart2= [HW]
Format: <io1>[,<io2>[,...,<io8>]]
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index 727c8d81aeaf..1523320abd87 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -1,13 +1,9 @@
Short users guide for SLUB
--------------------------
-First of all slub should transparently replace SLAB. If you enable
-SLUB then everything should work the same (Note the word "should".
-There is likely not much value in that word at this point).
-
The basic philosophy of SLUB is very different from SLAB. SLAB
requires rebuilding the kernel to activate debug options for all
-SLABS. SLUB always includes full debugging but its off by default.
+slab caches. SLUB always includes full debugging but it is off by default.
SLUB can enable debugging only for selected slabs in order to avoid
an impact on overall system performance which may make a bug more
difficult to find.
@@ -76,13 +72,28 @@ of objects.
Careful with tracing: It may spew out lots of information and never stop if
used on the wrong slab.
-SLAB Merging
+Slab merging
------------
-If no debugging is specified then SLUB may merge similar slabs together
+If no debug options are specified then SLUB may merge similar slabs together
in order to reduce overhead and increase cache hotness of objects.
slabinfo -a displays which slabs were merged together.
+Slab validation
+---------------
+
+SLUB can validate all object if the kernel was booted with slub_debug. In
+order to do so you must have the slabinfo tool. Then you can do
+
+slabinfo -v
+
+which will test all objects. Output will be generated to the syslog.
+
+This also works in a more limited way if boot was without slab debug.
+In that case slabinfo -v simply tests all reachable objects. Usually
+these are in the cpu slabs and the partial slabs. Full slabs are not
+tracked by SLUB in a non debug situation.
+
Getting more performance
------------------------
@@ -91,9 +102,9 @@ list_lock once in a while to deal with partial slabs. That overhead is
governed by the order of the allocation for each slab. The allocations
can be influenced by kernel parameters:
-slub_min_objects=x (default 8)
+slub_min_objects=x (default 4)
slub_min_order=x (default 0)
-slub_max_order=x (default 4)
+slub_max_order=x (default 1)
slub_min_objects allows to specify how many objects must at least fit
into one slab in order for the allocation order to be acceptable.
@@ -109,5 +120,107 @@ longer be checked. This is useful to avoid SLUB trying to generate
super large order pages to fit slub_min_objects of a slab cache with
large object sizes into one high order page.
-
-Christoph Lameter, <clameter@sgi.com>, April 10, 2007
+SLUB Debug output
+-----------------
+
+Here is a sample of slub debug output:
+
+*** SLUB kmalloc-8: Redzone Active@0xc90f6d20 slab 0xc528c530 offset=3360 flags=0x400000c3 inuse=61 freelist=0xc90f6d58
+ Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
+ Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005
+ Redzone 0xc90f6d28: 00 cc cc cc .
+FreePointer 0xc90f6d2c -> 0xc90f6d58
+Last alloc: get_modalias+0x61/0xf5 jiffies_ago=53 cpu=1 pid=554
+Filler 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
+ [<c010523d>] dump_trace+0x63/0x1eb
+ [<c01053df>] show_trace_log_lvl+0x1a/0x2f
+ [<c010601d>] show_trace+0x12/0x14
+ [<c0106035>] dump_stack+0x16/0x18
+ [<c017e0fa>] object_err+0x143/0x14b
+ [<c017e2cc>] check_object+0x66/0x234
+ [<c017eb43>] __slab_free+0x239/0x384
+ [<c017f446>] kfree+0xa6/0xc6
+ [<c02e2335>] get_modalias+0xb9/0xf5
+ [<c02e23b7>] dmi_dev_uevent+0x27/0x3c
+ [<c027866a>] dev_uevent+0x1ad/0x1da
+ [<c0205024>] kobject_uevent_env+0x20a/0x45b
+ [<c020527f>] kobject_uevent+0xa/0xf
+ [<c02779f1>] store_uevent+0x4f/0x58
+ [<c027758e>] dev_attr_store+0x29/0x2f
+ [<c01bec4f>] sysfs_write_file+0x16e/0x19c
+ [<c0183ba7>] vfs_write+0xd1/0x15a
+ [<c01841d7>] sys_write+0x3d/0x72
+ [<c0104112>] sysenter_past_esp+0x5f/0x99
+ [<b7f7b410>] 0xb7f7b410
+ =======================
+@@@ SLUB kmalloc-8: Restoring redzone (0xcc) from 0xc90f6d28-0xc90f6d2b
+
+
+
+If SLUB encounters a corrupted object then it will perform the following
+actions:
+
+1. Isolation and report of the issue
+
+This will be a message in the system log starting with
+
+*** SLUB <slab cache affected>: <What went wrong>@<object address>
+offset=<offset of object into slab> flags=<slabflags>
+inuse=<objects in use in this slab> freelist=<first free object in slab>
+
+2. Report on how the problem was dealt with in order to ensure the continued
+operation of the system.
+
+These are messages in the system log beginning with
+
+@@@ SLUB <slab cache affected>: <corrective action taken>
+
+
+In the above sample SLUB found that the Redzone of an active object has
+been overwritten. Here a string of 8 characters was written into a slab that
+has the length of 8 characters. However, a 8 character string needs a
+terminating 0. That zero has overwritten the first byte of the Redzone field.
+After reporting the details of the issue encountered the @@@ SLUB message
+tell us that SLUB has restored the redzone to its proper value and then
+system operations continue.
+
+Various types of lines can follow the @@@ SLUB line:
+
+Bytes b4 <address> : <bytes>
+ Show a few bytes before the object where the problem was detected.
+ Can be useful if the corruption does not stop with the start of the
+ object.
+
+Object <address> : <bytes>
+ The bytes of the object. If the object is inactive then the bytes
+ typically contain poisoning values. Any non-poison value shows a
+ corruption by a write after free.
+
+Redzone <address> : <bytes>
+ The redzone following the object. The redzone is used to detect
+ writes after the object. All bytes should always have the same
+ value. If there is any deviation then it is due to a write after
+ the object boundary.
+
+Freepointer
+ The pointer to the next free object in the slab. May become
+ corrupted if overwriting continues after the red zone.
+
+Last alloc:
+Last free:
+ Shows the address from which the object was allocated/freed last.
+ We note the pid, the time and the CPU that did so. This is usually
+ the most useful information to figure out where things went wrong.
+ Here get_modalias() did an kmalloc(8) instead of a kmalloc(9).
+
+Filler <address> : <bytes>
+ Unused data to fill up the space in order to get the next object
+ properly aligned. In the debug case we make sure that there are
+ at least 4 bytes of filler. This allow for the detection of writes
+ before the object.
+
+Following the filler will be a stackdump. That stackdump describes the
+location where the error was detected. The cause of the corruption is more
+likely to be found by looking at the information about the last alloc / free.
+
+Christoph Lameter, <clameter@sgi.com>, May 23, 2007
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 08f07a74a9d3..88baed1e7e83 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -943,10 +943,9 @@ exit:
static void smp_tune_scheduling(void)
{
- unsigned long cachesize; /* kB */
-
if (cpu_khz) {
- cachesize = boot_cpu_data.x86_cache_size;
+ /* cache size in kB */
+ long cachesize = boot_cpu_data.x86_cache_size;
if (cachesize > 0)
max_cache_size = cachesize * 1024;
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index b8536c7c0877..85cdd23b0447 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -355,8 +355,9 @@ config RMW_INSNS
adventurous.
config SINGLE_MEMORY_CHUNK
- bool "Use one physical chunk of memory only"
- depends on ADVANCED && !SUN3
+ bool "Use one physical chunk of memory only" if ADVANCED && !SUN3
+ default y if SUN3
+ select NEED_MULTIPLE_NODES
help
Ignore all but the first contiguous chunk of physical memory for VM
purposes. This will save a few bytes kernel size and may speed up
@@ -377,6 +378,14 @@ config 060_WRITETHROUGH
is hardwired on. The 53c710 SCSI driver is known to suffer from
this problem.
+config ARCH_DISCONTIGMEM_ENABLE
+ def_bool !SINGLE_MEMORY_CHUNK
+
+config NODES_SHIFT
+ int
+ default "3"
+ depends on !SINGLE_MEMORY_CHUNK
+
source "mm/Kconfig"
endmenu
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index c20831a7e1a9..aa383a5ea7ac 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -19,6 +19,7 @@ COMPILE_ARCH = $(shell uname -m)
# override top level makefile
AS += -m68020
LDFLAGS := -m m68kelf
+LDFLAGS_MODULE += -T $(srctree)/arch/m68k/kernel/module.lds
ifneq ($(COMPILE_ARCH),$(ARCH))
# prefix for cross-compiling binaries
CROSS_COMPILE = m68k-linux-gnu-
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index 0b68ab8d63d1..a806208c7fb5 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -9,13 +9,12 @@ else
endif
extra-y += vmlinux.lds
-obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o \
+obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o devres.o
devres-y = ../../../kernel/irq/devres.o
obj-$(CONFIG_PCI) += bios32.o
-obj-$(CONFIG_MODULES) += module.o
obj-y$(CONFIG_MMU_SUN3) += dma.o # no, it's not a typo
EXTRA_AFLAGS := -traditional
diff --git a/arch/m68k/kernel/module.c b/arch/m68k/kernel/module.c
index 3b1a2ff61ddc..774862bc6977 100644
--- a/arch/m68k/kernel/module.c
+++ b/arch/m68k/kernel/module.c
@@ -1,3 +1,9 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
@@ -11,6 +17,8 @@
#define DEBUGP(fmt...)
#endif
+#ifdef CONFIG_MODULES
+
void *module_alloc(unsigned long size)
{
if (size == 0)
@@ -118,11 +126,32 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
- struct module *me)
+ struct module *mod)
{
+ module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end);
+
return 0;
}
void module_arch_cleanup(struct module *mod)
{
}
+
+#endif /* CONFIG_MODULES */
+
+void module_fixup(struct module *mod, struct m68k_fixup_info *start,
+ struct m68k_fixup_info *end)
+{
+ struct m68k_fixup_info *fixup;
+
+ for (fixup = start; fixup < end; fixup++) {
+ switch (fixup->type) {
+ case m68k_fixup_memoffset:
+ *(u32 *)fixup->addr = m68k_memoffset;
+ break;
+ case m68k_fixup_vnode_shift:
+ *(u16 *)fixup->addr += m68k_virt_to_node_shift;
+ break;
+ }
+ }
+}
diff --git a/arch/m68k/kernel/module.lds b/arch/m68k/kernel/module.lds
new file mode 100644
index 000000000000..fda94fa38243
--- /dev/null
+++ b/arch/m68k/kernel/module.lds
@@ -0,0 +1,7 @@
+SECTIONS {
+ .m68k_fixup : {
+ __start_fixup = .;
+ *(.m68k_fixup)
+ __stop_fixup = .;
+ }
+}
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
index 610319356691..215c7bd43924 100644
--- a/arch/m68k/kernel/setup.c
+++ b/arch/m68k/kernel/setup.c
@@ -60,14 +60,12 @@ extern unsigned long availmem;
int m68k_num_memory;
int m68k_realnum_memory;
EXPORT_SYMBOL(m68k_realnum_memory);
-#ifdef CONFIG_SINGLE_MEMORY_CHUNK
unsigned long m68k_memoffset;
EXPORT_SYMBOL(m68k_memoffset);
-#endif
struct mem_info m68k_memory[NUM_MEMINFO];
EXPORT_SYMBOL(m68k_memory);
-static struct mem_info m68k_ramdisk;
+struct mem_info m68k_ramdisk;
static char m68k_command_line[CL_SIZE];
@@ -208,9 +206,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
void __init setup_arch(char **cmdline_p)
{
extern int _etext, _edata, _end;
-#ifndef CONFIG_SUN3
- unsigned long endmem, startmem;
-#endif
int i;
/* The bootinfo is located right after the kernel bss */
@@ -320,30 +315,16 @@ void __init setup_arch(char **cmdline_p)
panic("No configuration setup");
}
-#ifndef CONFIG_SUN3
- startmem= m68k_memory[0].addr;
- endmem = startmem + m68k_memory[0].size;
- high_memory = (void *)PAGE_OFFSET;
- for (i = 0; i < m68k_num_memory; i++) {
- m68k_memory[i].size &= MASK_256K;
- if (m68k_memory[i].addr < startmem)
- startmem = m68k_memory[i].addr;
- if (m68k_memory[i].addr+m68k_memory[i].size > endmem)
- endmem = m68k_memory[i].addr+m68k_memory[i].size;
- high_memory += m68k_memory[i].size;
- }
-
- availmem += init_bootmem_node(NODE_DATA(0), availmem >> PAGE_SHIFT,
- startmem >> PAGE_SHIFT, endmem >> PAGE_SHIFT);
-
- for (i = 0; i < m68k_num_memory; i++)
- free_bootmem(m68k_memory[i].addr, m68k_memory[i].size);
-
- reserve_bootmem(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
+ paging_init();
+#ifndef CONFIG_SUN3
+ for (i = 1; i < m68k_num_memory; i++)
+ free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
+ m68k_memory[i].size);
#ifdef CONFIG_BLK_DEV_INITRD
if (m68k_ramdisk.size) {
- reserve_bootmem(m68k_ramdisk.addr, m68k_ramdisk.size);
+ reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)),
+ m68k_ramdisk.addr, m68k_ramdisk.size);
initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
initrd_end = initrd_start + m68k_ramdisk.size;
printk("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
@@ -362,8 +343,6 @@ void __init setup_arch(char **cmdline_p)
#endif /* !CONFIG_SUN3 */
- paging_init();
-
/* set ISA defs early as possible */
#if defined(CONFIG_ISA) && defined(MULTI_ISA)
#if defined(CONFIG_Q40)
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
index 78f139226a1b..40f02b128f22 100644
--- a/arch/m68k/kernel/vmlinux-std.lds
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -60,6 +60,11 @@ SECTIONS
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
+ .m68k_fixup : {
+ __start_fixup = .;
+ *(.m68k_fixup)
+ __stop_fixup = .;
+ }
SECURITY_INIT
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(8192);
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index c8999b2db23b..f06425b6d206 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -54,6 +54,11 @@ __init_begin = .;
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
+ .m68k_fixup : {
+ __start_fixup = .;
+ *(.m68k_fixup)
+ __stop_fixup = .;
+ }
SECURITY_INIT
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(8192);
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index ab90213e5c54..f1de19e1dde6 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -7,6 +7,7 @@
* to motorola.c and sun3mmu.c
*/
+#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/mm.h>
@@ -31,6 +32,37 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+static bootmem_data_t __initdata bootmem_data[MAX_NUMNODES];
+
+pg_data_t pg_data_map[MAX_NUMNODES];
+EXPORT_SYMBOL(pg_data_map);
+
+int m68k_virt_to_node_shift;
+
+#ifndef CONFIG_SINGLE_MEMORY_CHUNK
+pg_data_t *pg_data_table[65];
+EXPORT_SYMBOL(pg_data_table);
+#endif
+
+void m68k_setup_node(int node)
+{
+#ifndef CONFIG_SINGLE_MEMORY_CHUNK
+ struct mem_info *info = m68k_memory + node;
+ int i, end;
+
+ i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
+ end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
+ for (; i <= end; i++) {
+ if (pg_data_table[i])
+ printk("overlap at %u for chunk %u\n", i, node);
+ pg_data_table[i] = pg_data_map + node;
+ }
+#endif
+ pg_data_map[node].bdata = bootmem_data + node;
+ node_set_online(node);
+}
+
+
/*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
@@ -40,52 +72,51 @@ void *empty_zero_page;
void show_mem(void)
{
- unsigned long i;
- int free = 0, total = 0, reserved = 0, shared = 0;
- int cached = 0;
-
- printk("\nMem-info:\n");
- show_free_areas();
- printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- i = max_mapnr;
- while (i-- > 0) {
- total++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (!page_count(mem_map+i))
- free++;
- else
- shared += page_count(mem_map+i) - 1;
- }
- printk("%d pages of RAM\n",total);
- printk("%d free pages\n",free);
- printk("%d reserved pages\n",reserved);
- printk("%d pages shared\n",shared);
- printk("%d pages swap cached\n",cached);
+ pg_data_t *pgdat;
+ int free = 0, total = 0, reserved = 0, shared = 0;
+ int cached = 0;
+ int i;
+
+ printk("\nMem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+ for_each_online_pgdat(pgdat) {
+ for (i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page = pgdat->node_mem_map + i;
+ total++;
+ if (PageReserved(page))
+ reserved++;
+ else if (PageSwapCache(page))
+ cached++;
+ else if (!page_count(page))
+ free++;
+ else
+ shared += page_count(page) - 1;
+ }
+ }
+ printk("%d pages of RAM\n",total);
+ printk("%d free pages\n",free);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+ printk("%d pages swap cached\n",cached);
}
extern void init_pointer_table(unsigned long ptable);
/* References to section boundaries */
-extern char _text, _etext, _edata, __bss_start, _end;
-extern char __init_begin, __init_end;
+extern char _text[], _etext[];
+extern char __init_begin[], __init_end[];
extern pmd_t *zero_pgtable;
void __init mem_init(void)
{
+ pg_data_t *pgdat;
int codepages = 0;
int datapages = 0;
int initpages = 0;
- unsigned long tmp;
-#ifndef CONFIG_SUN3
int i;
-#endif
-
- max_mapnr = num_physpages = (((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT);
#ifdef CONFIG_ATARI
if (MACH_IS_ATARI)
@@ -93,19 +124,25 @@ void __init mem_init(void)
#endif
/* this will put all memory onto the freelists */
- totalram_pages = free_all_bootmem();
-
- for (tmp = PAGE_OFFSET ; tmp < (unsigned long)high_memory; tmp += PAGE_SIZE) {
- if (PageReserved(virt_to_page(tmp))) {
- if (tmp >= (unsigned long)&_text
- && tmp < (unsigned long)&_etext)
+ totalram_pages = num_physpages = 0;
+ for_each_online_pgdat(pgdat) {
+ num_physpages += pgdat->node_present_pages;
+
+ totalram_pages += free_all_bootmem_node(pgdat);
+ for (i = 0; i < pgdat->node_spanned_pages; i++) {
+ struct page *page = pgdat->node_mem_map + i;
+ char *addr = page_to_virt(page);
+
+ if (!PageReserved(page))
+ continue;
+ if (addr >= _text &&
+ addr < _etext)
codepages++;
- else if (tmp >= (unsigned long) &__init_begin
- && tmp < (unsigned long) &__init_end)
+ else if (addr >= __init_begin &&
+ addr < __init_end)
initpages++;
else
datapages++;
- continue;
}
}
@@ -124,7 +161,7 @@ void __init mem_init(void)
printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
(unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
- max_mapnr << (PAGE_SHIFT-10),
+ totalram_pages << (PAGE_SHIFT-10),
codepages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10),
initpages << (PAGE_SHIFT-10));
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index 13c0b4ad01eb..b7473525b431 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -127,67 +127,6 @@ int free_pointer_table (pmd_t *ptable)
return 0;
}
-#ifdef DEBUG_INVALID_PTOV
-int mm_inv_cnt = 5;
-#endif
-
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
-/*
- * The following two routines map from a physical address to a kernel
- * virtual address and vice versa.
- */
-unsigned long mm_vtop(unsigned long vaddr)
-{
- int i=0;
- unsigned long voff = (unsigned long)vaddr - PAGE_OFFSET;
-
- do {
- if (voff < m68k_memory[i].size) {
-#ifdef DEBUGPV
- printk ("VTOP(%p)=%lx\n", vaddr,
- m68k_memory[i].addr + voff);
-#endif
- return m68k_memory[i].addr + voff;
- }
- voff -= m68k_memory[i].size;
- } while (++i < m68k_num_memory);
-
- /* As a special case allow `__pa(high_memory)'. */
- if (voff == 0)
- return m68k_memory[i-1].addr + m68k_memory[i-1].size;
-
- return -1;
-}
-EXPORT_SYMBOL(mm_vtop);
-
-unsigned long mm_ptov (unsigned long paddr)
-{
- int i = 0;
- unsigned long poff, voff = PAGE_OFFSET;
-
- do {
- poff = paddr - m68k_memory[i].addr;
- if (poff < m68k_memory[i].size) {
-#ifdef DEBUGPV
- printk ("PTOV(%lx)=%lx\n", paddr, poff + voff);
-#endif
- return poff + voff;
- }
- voff += m68k_memory[i].size;
- } while (++i < m68k_num_memory);
-
-#ifdef DEBUG_INVALID_PTOV
- if (mm_inv_cnt > 0) {
- mm_inv_cnt--;
- printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n",
- paddr, __builtin_return_address(0));
- }
-#endif
- return -1;
-}
-EXPORT_SYMBOL(mm_ptov);
-#endif
-
/* invalidate page in both caches */
static inline void clear040(unsigned long paddr)
{
@@ -354,15 +293,3 @@ void cache_push (unsigned long paddr, int len)
}
EXPORT_SYMBOL(cache_push);
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
-int mm_end_of_chunk (unsigned long addr, int len)
-{
- int i;
-
- for (i = 0; i < m68k_num_memory; i++)
- if (m68k_memory[i].addr + m68k_memory[i].size == addr + len)
- return 1;
- return 0;
-}
-EXPORT_SYMBOL(mm_end_of_chunk);
-#endif
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index afcccdc6ad45..7d571a2b44dd 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -43,6 +43,11 @@ unsigned long mm_cachebits;
EXPORT_SYMBOL(mm_cachebits);
#endif
+/* size of memory already mapped in head.S */
+#define INIT_MAPPED_SIZE (4UL<<20)
+
+extern unsigned long availmem;
+
static pte_t * __init kernel_page_table(void)
{
pte_t *ptablep;
@@ -98,19 +103,20 @@ static pmd_t * __init kernel_ptr_table(void)
return last_pgtable;
}
-static unsigned long __init
-map_chunk (unsigned long addr, long size)
+static void __init map_node(int node)
{
#define PTRTREESIZE (256*1024)
#define ROOTTREESIZE (32*1024*1024)
- static unsigned long virtaddr = PAGE_OFFSET;
- unsigned long physaddr;
+ unsigned long physaddr, virtaddr, size;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
- physaddr = (addr | m68k_supervisor_cachemode |
- _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+ size = m68k_memory[node].size;
+ physaddr = m68k_memory[node].addr;
+ virtaddr = (unsigned long)phys_to_virt(physaddr);
+ physaddr |= m68k_supervisor_cachemode |
+ _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
if (CPU_IS_040_OR_060)
physaddr |= _PAGE_GLOBAL040;
@@ -190,8 +196,6 @@ map_chunk (unsigned long addr, long size)
#ifdef DEBUG
printk("\n");
#endif
-
- return virtaddr;
}
/*
@@ -200,15 +204,16 @@ map_chunk (unsigned long addr, long size)
*/
void __init paging_init(void)
{
- int chunk;
- unsigned long mem_avail = 0;
unsigned long zones_size[MAX_NR_ZONES] = { 0, };
+ unsigned long min_addr, max_addr;
+ unsigned long addr, size, end;
+ int i;
#ifdef DEBUG
{
extern unsigned long availmem;
- printk ("start of paging_init (%p, %lx, %lx, %lx)\n",
- kernel_pg_dir, availmem, start_mem, end_mem);
+ printk ("start of paging_init (%p, %lx)\n",
+ kernel_pg_dir, availmem);
}
#endif
@@ -222,24 +227,62 @@ void __init paging_init(void)
pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
}
+ min_addr = m68k_memory[0].addr;
+ max_addr = min_addr + m68k_memory[0].size;
+ for (i = 1; i < m68k_num_memory;) {
+ if (m68k_memory[i].addr < min_addr) {
+ printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
+ m68k_memory[i].addr, m68k_memory[i].size);
+ printk("Fix your bootloader or use a memfile to make use of this area!\n");
+ m68k_num_memory--;
+ memmove(m68k_memory + i, m68k_memory + i + 1,
+ (m68k_num_memory - i) * sizeof(struct mem_info));
+ continue;
+ }
+ addr = m68k_memory[i].addr + m68k_memory[i].size;
+ if (addr > max_addr)
+ max_addr = addr;
+ i++;
+ }
+ m68k_memoffset = min_addr - PAGE_OFFSET;
+ m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
+
+ module_fixup(NULL, __start_fixup, __stop_fixup);
+ flush_icache();
+
+ high_memory = phys_to_virt(max_addr);
+
+ min_low_pfn = availmem >> PAGE_SHIFT;
+ max_low_pfn = max_addr >> PAGE_SHIFT;
+
+ for (i = 0; i < m68k_num_memory; i++) {
+ addr = m68k_memory[i].addr;
+ end = addr + m68k_memory[i].size;
+ m68k_setup_node(i);
+ availmem = PAGE_ALIGN(availmem);
+ availmem += init_bootmem_node(NODE_DATA(i),
+ availmem >> PAGE_SHIFT,
+ addr >> PAGE_SHIFT,
+ end >> PAGE_SHIFT);
+ }
+
/*
* Map the physical memory available into the kernel virtual
- * address space. It may allocate some memory for page
- * tables and thus modify availmem.
+ * address space. First initialize the bootmem allocator with
+ * the memory we already mapped, so map_node() has something
+ * to allocate.
*/
+ addr = m68k_memory[0].addr;
+ size = m68k_memory[0].size;
+ free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr));
+ map_node(0);
+ if (size > INIT_MAPPED_SIZE)
+ free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE);
- for (chunk = 0; chunk < m68k_num_memory; chunk++) {
- mem_avail = map_chunk (m68k_memory[chunk].addr,
- m68k_memory[chunk].size);
-
- }
+ for (i = 1; i < m68k_num_memory; i++)
+ map_node(i);
flush_tlb_all();
-#ifdef DEBUG
- printk ("memory available is %ldKB\n", mem_avail >> 10);
- printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
- start_mem, end_mem);
-#endif
/*
* initialize the bad page table and bad page to point
@@ -256,14 +299,11 @@ void __init paging_init(void)
#ifdef DEBUG
printk ("before free_area_init\n");
#endif
- zones_size[ZONE_DMA] = (mach_max_dma_address < (unsigned long)high_memory ?
- (mach_max_dma_address+1) : (unsigned long)high_memory);
- zones_size[ZONE_NORMAL] = (unsigned long)high_memory - zones_size[0];
-
- zones_size[ZONE_DMA] = (zones_size[ZONE_DMA] - PAGE_OFFSET) >> PAGE_SHIFT;
- zones_size[ZONE_NORMAL] >>= PAGE_SHIFT;
-
- free_area_init(zones_size);
+ for (i = 0; i < m68k_num_memory; i++) {
+ zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
+ free_area_init_node(i, pg_data_map + i, zones_size,
+ m68k_memory[i].addr >> PAGE_SHIFT, NULL);
+ }
}
extern char __init_begin, __init_end;
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 4851b8437a87..c0fbd278fbb1 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -21,6 +21,7 @@
#include <asm/contregs.h>
#include <asm/movs.h>
#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
#include <asm/sun3-head.h>
#include <asm/sun3mmu.h>
#include <asm/rtc.h>
@@ -127,6 +128,7 @@ void __init sun3_bootmem_alloc(unsigned long memory_start, unsigned long memory_
high_memory = (void *)memory_end;
availmem = memory_start;
+ m68k_setup_node(0);
availmem += init_bootmem_node(NODE_DATA(0), start_page, 0, num_pages);
availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 3ca9c610c110..af625147df62 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3783,6 +3783,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
/* NCQ is broken */
{ "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
+ { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
/* NCQ hard hangs device under heavier load, needs hard power cycle */
{ "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
/* Blacklist entries taken from Silicon Image 3124/3132
diff --git a/drivers/char/drm/drm_drawable.c b/drivers/char/drm/drm_drawable.c
index de37d5f74563..b33313be2547 100644
--- a/drivers/char/drm/drm_drawable.c
+++ b/drivers/char/drm/drm_drawable.c
@@ -172,38 +172,49 @@ int drm_rmdraw(DRM_IOCTL_ARGS)
bitfield_length = idx + 1;
- if (idx != id / (8 * sizeof(*bitfield)))
- bitfield = drm_alloc(bitfield_length *
- sizeof(*bitfield), DRM_MEM_BUFS);
+ bitfield = NULL;
- if (!bitfield && bitfield_length) {
- bitfield = dev->drw_bitfield;
- bitfield_length = dev->drw_bitfield_length;
+ if (bitfield_length) {
+ if (bitfield_length != dev->drw_bitfield_length)
+ bitfield = drm_alloc(bitfield_length *
+ sizeof(*bitfield),
+ DRM_MEM_BUFS);
+
+ if (!bitfield) {
+ bitfield = dev->drw_bitfield;
+ bitfield_length = dev->drw_bitfield_length;
+ }
}
}
if (bitfield != dev->drw_bitfield) {
info_length = 8 * sizeof(*bitfield) * bitfield_length;
- info = drm_alloc(info_length * sizeof(*info), DRM_MEM_BUFS);
+ if (info_length) {
+ info = drm_alloc(info_length * sizeof(*info),
+ DRM_MEM_BUFS);
- if (!info && info_length) {
- info = dev->drw_info;
- info_length = dev->drw_info_length;
- }
+ if (!info) {
+ info = dev->drw_info;
+ info_length = dev->drw_info_length;
+ }
+ } else
+ info = NULL;
spin_lock_irqsave(&dev->drw_lock, irqflags);
- memcpy(bitfield, dev->drw_bitfield, bitfield_length *
- sizeof(*bitfield));
+ if (bitfield)
+ memcpy(bitfield, dev->drw_bitfield, bitfield_length *
+ sizeof(*bitfield));
drm_free(dev->drw_bitfield, sizeof(*bitfield) *
dev->drw_bitfield_length, DRM_MEM_BUFS);
dev->drw_bitfield = bitfield;
dev->drw_bitfield_length = bitfield_length;
if (info != dev->drw_info) {
- memcpy(info, dev->drw_info, info_length *
- sizeof(*info));
+ if (info)
+ memcpy(info, dev->drw_info, info_length *
+ sizeof(*info));
drm_free(dev->drw_info, sizeof(*info) *
dev->drw_info_length, DRM_MEM_BUFS);
dev->drw_info = info;
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index 31cdde83713b..177ccc07f968 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -102,13 +102,20 @@
{0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
+ {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
{0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
+ {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+ {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
{0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index 78c1ae28f17c..b92062a239f1 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -582,7 +582,7 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&dev_priv->swaps_lock);
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6ccc2e95930a..1cff65fb9c43 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1625,18 +1625,20 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_pcie_aer_ext_cap);
#ifdef CONFIG_PCI_MSI
-/* The Serverworks PCI-X chipset does not support MSI. We cannot easily rely
- * on setting PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
- * some other busses controlled by the chipset even if Linux is not aware of it.
- * Instead of setting the flag on all busses in the machine, simply disable MSI
- * globally.
+/* Some chipsets do not support MSI. We cannot easily rely on setting
+ * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
+ * some other busses controlled by the chipset even if Linux is not
+ * aware of it. Instead of setting the flag on all busses in the
+ * machine, simply disable MSI globally.
*/
-static void __init quirk_svw_msi(struct pci_dev *dev)
+static void __init quirk_disable_all_msi(struct pci_dev *dev)
{
pci_no_msi();
printk(KERN_WARNING "PCI: MSI quirk detected. MSI deactivated.\n");
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_svw_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
static void __devinit quirk_disable_msi(struct pci_dev *dev)
@@ -1649,8 +1651,6 @@ static void __devinit quirk_disable_msi(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_msi);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_msi);
/* Go through the list of Hypertransport capabilities and
* return 1 if a HT MSI capability is found and enabled */
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 948efc775a78..eb6abd3f9221 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -336,16 +336,21 @@ static int at91_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
enable_irq_wake(board->det_pin);
if (board->irq_pin)
enable_irq_wake(board->irq_pin);
- } else {
- disable_irq_wake(board->det_pin);
- if (board->irq_pin)
- disable_irq_wake(board->irq_pin);
}
return 0;
}
static int at91_cf_resume(struct platform_device *pdev)
{
+ struct at91_cf_socket *cf = platform_get_drvdata(pdev);
+ struct at91_cf_data *board = cf->board;
+
+ if (device_may_wakeup(&pdev->dev)) {
+ disable_irq_wake(board->det_pin);
+ if (board->irq_pin)
+ disable_irq_wake(board->irq_pin);
+ }
+
pcmcia_socket_dev_resume(&pdev->dev);
return 0;
}
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index bd30aba242d0..731d7a5c5aa2 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -1286,34 +1286,36 @@ static int neofb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
if (regno >= fb->cmap.len || regno > 255)
return -EINVAL;
- switch (fb->var.bits_per_pixel) {
- case 8:
+ if (fb->var.bits_per_pixel <= 8) {
outb(regno, 0x3c8);
outb(red >> 10, 0x3c9);
outb(green >> 10, 0x3c9);
outb(blue >> 10, 0x3c9);
- break;
- case 16:
- ((u32 *) fb->pseudo_palette)[regno] =
+ } else if (regno < 16) {
+ switch (fb->var.bits_per_pixel) {
+ case 16:
+ ((u32 *) fb->pseudo_palette)[regno] =
((red & 0xf800)) | ((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
- break;
- case 24:
- ((u32 *) fb->pseudo_palette)[regno] =
+ break;
+ case 24:
+ ((u32 *) fb->pseudo_palette)[regno] =
((red & 0xff00) << 8) | ((green & 0xff00)) |
((blue & 0xff00) >> 8);
- break;
+ break;
#ifdef NO_32BIT_SUPPORT_YET
- case 32:
- ((u32 *) fb->pseudo_palette)[regno] =
+ case 32:
+ ((u32 *) fb->pseudo_palette)[regno] =
((transp & 0xff00) << 16) | ((red & 0xff00) << 8) |
((green & 0xff00)) | ((blue & 0xff00) >> 8);
- break;
+ break;
#endif
- default:
- return 1;
+ default:
+ return 1;
+ }
}
+
return 0;
}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 2dac3ad2c44b..2c55dd94a1de 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -17,6 +17,8 @@
#include <linux/rxrpc.h>
#include <linux/key.h>
#include <linux/workqueue.h>
+#include <linux/sched.h>
+
#include "afs.h"
#include "afs_vl.h"
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 0c542ec92d5b..00eee87510fe 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -168,7 +168,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
return dreq;
}
-static void nfs_direct_req_release(struct kref *kref)
+static void nfs_direct_req_free(struct kref *kref)
{
struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
@@ -177,6 +177,11 @@ static void nfs_direct_req_release(struct kref *kref)
kmem_cache_free(nfs_direct_cachep, dreq);
}
+static void nfs_direct_req_release(struct nfs_direct_req *dreq)
+{
+ kref_put(&dreq->kref, nfs_direct_req_free);
+}
+
/*
* Collects and returns the final error value/byte-count.
*/
@@ -196,7 +201,6 @@ static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
result = dreq->count;
out:
- kref_put(&dreq->kref, nfs_direct_req_release);
return (ssize_t) result;
}
@@ -214,7 +218,7 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
}
complete_all(&dreq->completion);
- kref_put(&dreq->kref, nfs_direct_req_release);
+ nfs_direct_req_release(dreq);
}
/*
@@ -369,6 +373,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size
if (!result)
result = nfs_direct_wait(dreq);
rpc_clnt_sigunmask(clnt, &oldset);
+ nfs_direct_req_release(dreq);
return result;
}
@@ -716,6 +721,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz
if (!result)
result = nfs_direct_wait(dreq);
rpc_clnt_sigunmask(clnt, &oldset);
+ nfs_direct_req_release(dreq);
return result;
}
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 074791ce4ab2..b532a730cec2 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -140,7 +140,7 @@ static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na)
if (!ni->name)
return -ENOMEM;
memcpy(ni->name, na->name, i);
- ni->name[i] = 0;
+ ni->name[na->name_len] = 0;
}
return 0;
}
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 3b481d557edb..9345a46ffb32 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -179,7 +179,7 @@ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
return ret;
}
- ret = vmtruncate(inode, size);
+ ret = vmtruncate(inode, newsize);
return ret;
}
diff --git a/include/asm-m68k/mmzone.h b/include/asm-m68k/mmzone.h
new file mode 100644
index 000000000000..e1f1ec7b7006
--- /dev/null
+++ b/include/asm-m68k/mmzone.h
@@ -0,0 +1,9 @@
+#ifndef _ASM_M68K_MMZONE_H_
+#define _ASM_M68K_MMZONE_H_
+
+extern pg_data_t pg_data_map[];
+
+#define NODE_DATA(nid) (&pg_data_map[nid])
+#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
+
+#endif /* _ASM_M68K_MMZONE_H_ */
diff --git a/include/asm-m68k/module.h b/include/asm-m68k/module.h
index c6d75af2d8d3..382d20a6fc18 100644
--- a/include/asm-m68k/module.h
+++ b/include/asm-m68k/module.h
@@ -1,7 +1,39 @@
#ifndef _ASM_M68K_MODULE_H
#define _ASM_M68K_MODULE_H
-struct mod_arch_specific { };
+
+struct mod_arch_specific {
+ struct m68k_fixup_info *fixup_start, *fixup_end;
+};
+
+#define MODULE_ARCH_INIT { \
+ .fixup_start = __start_fixup, \
+ .fixup_end = __stop_fixup, \
+}
+
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
+
+
+enum m68k_fixup_type {
+ m68k_fixup_memoffset,
+ m68k_fixup_vnode_shift,
+};
+
+struct m68k_fixup_info {
+ enum m68k_fixup_type type;
+ void *addr;
+};
+
+#define m68k_fixup(type, addr) \
+ " .section \".m68k_fixup\",\"aw\"\n" \
+ " .long " #type "," #addr "\n" \
+ " .previous\n"
+
+extern struct m68k_fixup_info __start_fixup[], __stop_fixup[];
+
+struct module;
+extern void module_fixup(struct module *mod, struct m68k_fixup_info *start,
+ struct m68k_fixup_info *end);
+
#endif /* _ASM_M68K_MODULE_H */
diff --git a/include/asm-m68k/motorola_pgtable.h b/include/asm-m68k/motorola_pgtable.h
index 61e4406ed96a..b5b78c01eb6c 100644
--- a/include/asm-m68k/motorola_pgtable.h
+++ b/include/asm-m68k/motorola_pgtable.h
@@ -130,7 +130,7 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
#define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_clear(mm,addr,ptep) ({ pte_val(*(ptep)) = 0; })
-#define pte_page(pte) (mem_map + ((unsigned long)(__va(pte_val(pte)) - PAGE_OFFSET) >> PAGE_SHIFT))
+#define pte_page(pte) virt_to_page(__va(pte_val(pte)))
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
@@ -143,7 +143,7 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
while (--__i >= 0) \
*__ptr++ = 0; \
})
-#define pmd_page(pmd) (mem_map + ((unsigned long)(__va(pmd_val(pmd)) - PAGE_OFFSET) >> PAGE_SHIFT))
+#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
#define pgd_none(pgd) (!pgd_val(pgd))
@@ -223,10 +223,10 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
}
-#define pte_offset_map(pmdp,address) ((pte_t *)kmap(pmd_page(*pmdp)) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
+#define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address)
-#define pte_unmap(pte) kunmap(pte)
-#define pte_unmap_nested(pte) kunmap(pte)
+#define pte_unmap(pte) ((void)0)
+#define pte_unmap_nested(pte) ((void)0)
/*
* Allocate and free page tables. The xxx_kernel() versions are
diff --git a/include/asm-m68k/page.h b/include/asm-m68k/page.h
index fcc165ddd09e..9e6d0d6debdb 100644
--- a/include/asm-m68k/page.h
+++ b/include/asm-m68k/page.h
@@ -27,6 +27,8 @@
#ifndef __ASSEMBLY__
+#include <asm/module.h>
+
#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
#define free_user_page(page, addr) free_page(addr)
@@ -114,18 +116,33 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#ifndef __ASSEMBLY__
+extern unsigned long m68k_memoffset;
+
#ifndef CONFIG_SUN3
#define WANT_PAGE_VIRTUAL
-#ifdef CONFIG_SINGLE_MEMORY_CHUNK
-extern unsigned long m68k_memoffset;
-#define __pa(vaddr) ((unsigned long)(vaddr)+m68k_memoffset)
-#define __va(paddr) ((void *)((unsigned long)(paddr)-m68k_memoffset))
-#else
-#define __pa(vaddr) virt_to_phys((void *)(vaddr))
-#define __va(paddr) phys_to_virt((unsigned long)(paddr))
-#endif
+static inline unsigned long ___pa(void *vaddr)
+{
+ unsigned long paddr;
+ asm (
+ "1: addl #0,%0\n"
+ m68k_fixup(%c2, 1b+2)
+ : "=r" (paddr)
+ : "0" (vaddr), "i" (m68k_fixup_memoffset));
+ return paddr;
+}
+#define __pa(vaddr) ___pa((void *)(vaddr))
+static inline void *__va(unsigned long paddr)
+{
+ void *vaddr;
+ asm (
+ "1: subl #0,%0\n"
+ m68k_fixup(%c2, 1b+2)
+ : "=r" (vaddr)
+ : "0" (paddr), "i" (m68k_fixup_memoffset));
+ return vaddr;
+}
#else /* !CONFIG_SUN3 */
/* This #define is a horrible hack to suppress lots of warnings. --m */
@@ -161,11 +178,47 @@ static inline void *__va(unsigned long x)
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
-#define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr)-PAGE_OFFSET) >> PAGE_SHIFT))
-#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
+extern int m68k_virt_to_node_shift;
+
+#ifdef CONFIG_SINGLE_MEMORY_CHUNK
+#define __virt_to_node(addr) (&pg_data_map[0])
+#else
+extern struct pglist_data *pg_data_table[];
+
+static inline __attribute_const__ int __virt_to_node_shift(void)
+{
+ int shift;
+
+ asm (
+ "1: moveq #0,%0\n"
+ m68k_fixup(%c1, 1b)
+ : "=d" (shift)
+ : "i" (m68k_fixup_vnode_shift));
+ return shift;
+}
+
+#define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()])
+#endif
-#define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn))
-#define page_to_pfn(page) virt_to_pfn(page_to_virt(page))
+#define virt_to_page(addr) ({ \
+ pfn_to_page(virt_to_pfn(addr)); \
+})
+#define page_to_virt(page) ({ \
+ pfn_to_virt(page_to_pfn(page)); \
+})
+
+#define pfn_to_page(pfn) ({ \
+ unsigned long __pfn = (pfn); \
+ struct pglist_data *pgdat; \
+ pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \
+ pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \
+})
+#define page_to_pfn(_page) ({ \
+ struct page *__p = (_page); \
+ struct pglist_data *pgdat; \
+ pgdat = &pg_data_map[page_to_nid(__p)]; \
+ ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
+})
#define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory)
#define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
diff --git a/include/asm-m68k/pgalloc.h b/include/asm-m68k/pgalloc.h
index a9cfb4b99d88..4cb1a57ab763 100644
--- a/include/asm-m68k/pgalloc.h
+++ b/include/asm-m68k/pgalloc.h
@@ -8,11 +8,12 @@
#include <asm/virtconvert.h>
-
#ifdef CONFIG_SUN3
#include <asm/sun3_pgalloc.h>
#else
#include <asm/motorola_pgalloc.h>
#endif
+extern void m68k_setup_node(int node);
+
#endif /* M68K_PGALLOC_H */
diff --git a/include/asm-m68k/pgtable.h b/include/asm-m68k/pgtable.h
index 555b87a1f7e3..778a4c538eb2 100644
--- a/include/asm-m68k/pgtable.h
+++ b/include/asm-m68k/pgtable.h
@@ -107,22 +107,7 @@ extern void *empty_zero_page;
/* 64-bit machines, beware! SRB. */
#define SIZEOF_PTR_LOG2 2
-/*
- * Check if the addr/len goes up to the end of a physical
- * memory chunk. Used for DMA functions.
- */
-#ifdef CONFIG_SINGLE_MEMORY_CHUNK
-/*
- * It makes no sense to consider whether we cross a memory boundary if
- * we support just one physical chunk of memory.
- */
-static inline int mm_end_of_chunk(unsigned long addr, int len)
-{
- return 0;
-}
-#else
-int mm_end_of_chunk (unsigned long addr, int len);
-#endif
+#define mm_end_of_chunk(addr, len) 0
extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
diff --git a/include/asm-m68k/sun3_pgtable.h b/include/asm-m68k/sun3_pgtable.h
index 5156a28a18d8..b9e62c1e7ae3 100644
--- a/include/asm-m68k/sun3_pgtable.h
+++ b/include/asm-m68k/sun3_pgtable.h
@@ -132,8 +132,8 @@ static inline void pte_clear (struct mm_struct *mm, unsigned long addr, pte_t *p
#define pfn_pte(pfn, pgprot) \
({ pte_t __pte; pte_val(__pte) = pfn | pgprot_val(pgprot); __pte; })
-#define pte_page(pte) (mem_map+((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT))
-#define pmd_page(pmd) (mem_map+((__pmd_page(pmd) - PAGE_OFFSET) >> PAGE_SHIFT))
+#define pte_page(pte) virt_to_page(__pte_page(pte))
+#define pmd_page(pmd) virt_to_page(__pmd_page(pmd))
static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }
diff --git a/include/asm-m68k/virtconvert.h b/include/asm-m68k/virtconvert.h
index 83a87c9b1a16..dea32fbc7e51 100644
--- a/include/asm-m68k/virtconvert.h
+++ b/include/asm-m68k/virtconvert.h
@@ -8,56 +8,35 @@
#ifdef __KERNEL__
#include <linux/compiler.h>
+#include <linux/mmzone.h>
#include <asm/setup.h>
#include <asm/page.h>
-#ifdef CONFIG_AMIGA
-#include <asm/amigahw.h>
-#endif
-
/*
* Change virtual addresses to physical addresses and vv.
*/
-#ifndef CONFIG_SUN3
-extern unsigned long mm_vtop(unsigned long addr) __attribute_const__;
-extern unsigned long mm_ptov(unsigned long addr) __attribute_const__;
-#else
-static inline unsigned long mm_vtop(unsigned long vaddr)
-{
- return __pa(vaddr);
-}
-
-static inline unsigned long mm_ptov(unsigned long paddr)
-{
- return (unsigned long)__va(paddr);
-}
-#endif
-
-#ifdef CONFIG_SINGLE_MEMORY_CHUNK
-static inline unsigned long virt_to_phys(void *vaddr)
-{
- return (unsigned long)vaddr - PAGE_OFFSET + m68k_memory[0].addr;
-}
-
-static inline void * phys_to_virt(unsigned long paddr)
-{
- return (void *)(paddr - m68k_memory[0].addr + PAGE_OFFSET);
-}
-#else
static inline unsigned long virt_to_phys(void *address)
{
- return mm_vtop((unsigned long)address);
+ return __pa(address);
}
static inline void *phys_to_virt(unsigned long address)
{
- return (void *) mm_ptov(address);
+ return __va(address);
}
-#endif
/* Permanent address of a page. */
-#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
-#define page_to_phys(page) virt_to_phys((void *)__page_address(page))
+#ifdef CONFIG_SINGLE_MEMORY_CHUNK
+#define page_to_phys(page) \
+ __pa(PAGE_OFFSET + (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT))
+#else
+#define page_to_phys(_page) ({ \
+ struct page *__page = _page; \
+ struct pglist_data *pgdat; \
+ pgdat = pg_data_table[page_to_nid(__page)]; \
+ page_to_pfn(__page) << PAGE_SHIFT; \
+})
+#endif
/*
* IO bus memory addresses are 1:1 with the physical address,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d8970623c566..bd8e33582d25 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2689,7 +2689,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
map = alloc_bootmem_node(pgdat, size);
pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
}
-#ifdef CONFIG_FLATMEM
+#ifndef CONFIG_NEED_MULTIPLE_NODES
/*
* With no DISCONTIG, the global mem_map is just set as node 0's
*/
diff --git a/mm/slub.c b/mm/slub.c
index 3e5aefcb4075..238c5a6ee896 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2435,6 +2435,7 @@ void __init kmem_cache_init(void)
*/
create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
sizeof(struct kmem_cache_node), GFP_KERNEL);
+ kmalloc_caches[0].refcount = -1;
#endif
/* Able to allocate the per node structures */
@@ -2482,6 +2483,12 @@ static int slab_unmergeable(struct kmem_cache *s)
if (s->ctor)
return 1;
+ /*
+ * We may have set a slab to be unmergeable during bootstrap.
+ */
+ if (s->refcount < 0)
+ return 1;
+
return 0;
}