aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/8253pit.h7
-rw-r--r--arch/alpha/include/asm/kmap_types.h24
-rw-r--r--arch/alpha/kernel/init_task.c3
-rw-r--r--arch/alpha/kernel/irq_alpha.c2
-rw-r--r--arch/alpha/kernel/irq_i8259.c2
-rw-r--r--arch/alpha/kernel/irq_impl.h2
-rw-r--r--arch/alpha/kernel/irq_pyxis.c2
-rw-r--r--arch/alpha/kernel/irq_srm.c2
-rw-r--r--arch/alpha/kernel/setup.c6
-rw-r--r--arch/alpha/kernel/sys_alcor.c2
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c2
-rw-r--r--arch/alpha/kernel/sys_dp264.c6
-rw-r--r--arch/alpha/kernel/sys_eb64p.c2
-rw-r--r--arch/alpha/kernel/sys_eiger.c2
-rw-r--r--arch/alpha/kernel/sys_jensen.c2
-rw-r--r--arch/alpha/kernel/sys_marvel.c10
-rw-r--r--arch/alpha/kernel/sys_mikasa.c2
-rw-r--r--arch/alpha/kernel/sys_noritake.c2
-rw-r--r--arch/alpha/kernel/sys_rawhide.c2
-rw-r--r--arch/alpha/kernel/sys_ruffian.c1
-rw-r--r--arch/alpha/kernel/sys_rx164.c2
-rw-r--r--arch/alpha/kernel/sys_sable.c2
-rw-r--r--arch/alpha/kernel/sys_takara.c2
-rw-r--r--arch/alpha/kernel/sys_titan.c4
-rw-r--r--arch/alpha/kernel/sys_wildfire.c2
-rw-r--r--arch/alpha/mm/numa.c6
-rw-r--r--arch/arm/kernel/init_task.c4
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c23
-rw-r--r--arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h50
-rw-r--r--arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h377
-rw-r--r--arch/avr32/kernel/init_task.c4
-rw-r--r--arch/blackfin/Kconfig60
-rw-r--r--arch/blackfin/Makefile7
-rw-r--r--arch/blackfin/boot/.gitignore3
-rw-r--r--arch/blackfin/boot/Makefile31
-rw-r--r--arch/blackfin/include/asm/atomic.h16
-rw-r--r--arch/blackfin/include/asm/bfin-global.h11
-rw-r--r--arch/blackfin/include/asm/bitops.h3
-rw-r--r--arch/blackfin/include/asm/bug.h57
-rw-r--r--arch/blackfin/include/asm/cache.h11
-rw-r--r--arch/blackfin/include/asm/cacheflush.h3
-rw-r--r--arch/blackfin/include/asm/cpu.h1
-rw-r--r--arch/blackfin/include/asm/ftrace.h14
-rw-r--r--arch/blackfin/include/asm/ipipe.h28
-rw-r--r--arch/blackfin/include/asm/irq.h271
-rw-r--r--arch/blackfin/include/asm/irqflags.h63
-rw-r--r--arch/blackfin/include/asm/kmap_types.h17
-rw-r--r--arch/blackfin/include/asm/mutex-dec.h112
-rw-r--r--arch/blackfin/include/asm/sections.h11
-rw-r--r--arch/blackfin/include/asm/system.h4
-rw-r--r--arch/blackfin/include/asm/unistd.h3
-rw-r--r--arch/blackfin/kernel/Makefile5
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c4
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c5
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c2
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbmgr.c54
-rw-r--r--arch/blackfin/kernel/early_printk.c8
-rw-r--r--arch/blackfin/kernel/ftrace-entry.S140
-rw-r--r--arch/blackfin/kernel/ftrace.c42
-rw-r--r--arch/blackfin/kernel/init_task.c4
-rw-r--r--arch/blackfin/kernel/ipipe.c42
-rw-r--r--arch/blackfin/kernel/setup.c16
-rw-r--r--arch/blackfin/kernel/stacktrace.c53
-rw-r--r--arch/blackfin/kernel/traps.c128
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S17
-rw-r--r--arch/blackfin/lib/checksum.c2
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c12
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c5
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c5
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c5
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c5
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c5
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c5
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c5
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c5
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537.c7
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c5
-rw-r--r--arch/blackfin/mach-bf537/boards/pnav10.c7
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c5
-rw-r--r--arch/blackfin/mach-bf538/boards/ezkit.c10
-rw-r--r--arch/blackfin/mach-bf548/boards/cm_bf548.c10
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c23
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c7
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c5
-rw-r--r--arch/blackfin/mach-common/cache-c.c14
-rw-r--r--arch/blackfin/mach-common/entry.S12
-rw-r--r--arch/blackfin/mach-common/smp.c24
-rw-r--r--arch/cris/include/asm/kmap_types.h17
-rw-r--r--arch/cris/kernel/process.c4
-rw-r--r--arch/frv/kernel/init_task.c4
-rw-r--r--arch/h8300/include/asm/kmap_types.h17
-rw-r--r--arch/h8300/kernel/init_task.c4
-rw-r--r--arch/ia64/hp/common/sba_iommu.c2
-rw-r--r--arch/ia64/include/asm/kmap_types.h24
-rw-r--r--arch/ia64/kernel/init_task.c4
-rw-r--r--arch/ia64/kernel/mca.c3
-rw-r--r--arch/ia64/kernel/paravirt_patchlist.c2
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kernel/uncached.c3
-rw-r--r--arch/ia64/sn/pci/pci_dma.c3
-rw-r--r--arch/m32r/include/asm/kmap_types.h23
-rw-r--r--arch/m32r/kernel/init_task.c4
-rw-r--r--arch/m32r/mm/discontig.c6
-rw-r--r--arch/m32r/platforms/m32104ut/setup.c2
-rw-r--r--arch/m32r/platforms/m32700ut/setup.c8
-rw-r--r--arch/m32r/platforms/mappi/setup.c2
-rw-r--r--arch/m32r/platforms/mappi2/setup.c2
-rw-r--r--arch/m32r/platforms/mappi3/setup.c2
-rw-r--r--arch/m32r/platforms/oaks32r/setup.c2
-rw-r--r--arch/m32r/platforms/opsput/setup.c8
-rw-r--r--arch/m32r/platforms/usrv/setup.c4
-rw-r--r--arch/m68k/include/asm/kmap_types.h17
-rw-r--r--arch/m68k/kernel/process.c4
-rw-r--r--arch/m68knommu/kernel/init_task.c4
-rw-r--r--arch/microblaze/include/asm/kmap_types.h25
-rw-r--r--arch/mips/configs/bigsur_defconfig2
-rw-r--r--arch/mips/configs/mtx1_defconfig2
-rw-r--r--arch/mips/include/asm/i8253.h2
-rw-r--r--arch/mips/include/asm/kmap_types.h24
-rw-r--r--arch/mips/kernel/init_task.c4
-rw-r--r--arch/mips/sni/eisa.c2
-rw-r--r--arch/mn10300/include/asm/kmap_types.h27
-rw-r--r--arch/mn10300/kernel/init_task.c3
-rw-r--r--arch/parisc/include/asm/kmap_types.h24
-rw-r--r--arch/parisc/kernel/init_task.c4
-rw-r--r--arch/powerpc/Kconfig5
-rw-r--r--arch/powerpc/boot/install.sh3
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig2
-rw-r--r--arch/powerpc/include/asm/8253pit.h7
-rw-r--r--arch/powerpc/include/asm/atomic.h3
-rw-r--r--arch/powerpc/include/asm/hw_irq.h5
-rw-r--r--arch/powerpc/include/asm/iommu.h10
-rw-r--r--arch/powerpc/include/asm/ps3.h18
-rw-r--r--arch/powerpc/include/asm/ps3gpu.h86
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h3
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/init_task.c4
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/time.c10
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c4
-rw-r--r--arch/powerpc/platforms/cell/iommu.c37
-rw-r--r--arch/powerpc/platforms/cell/ras.c4
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c11
-rw-r--r--arch/powerpc/platforms/iseries/dt.c3
-rw-r--r--arch/powerpc/platforms/iseries/mf.c3
-rw-r--r--arch/powerpc/platforms/ps3/mm.c12
-rw-r--r--arch/powerpc/platforms/ps3/os-area.c142
-rw-r--r--arch/powerpc/platforms/ps3/platform.h10
-rw-r--r--arch/powerpc/platforms/ps3/setup.c1
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c15
-rw-r--r--arch/s390/Kconfig9
-rw-r--r--arch/s390/Makefile4
-rw-r--r--arch/s390/appldata/appldata_base.c119
-rw-r--r--arch/s390/include/asm/ccwdev.h19
-rw-r--r--arch/s390/include/asm/ccwgroup.h10
-rw-r--r--arch/s390/include/asm/kmap_types.h17
-rw-r--r--arch/s390/include/asm/suspend.h10
-rw-r--r--arch/s390/include/asm/system.h22
-rw-r--r--arch/s390/kernel/early.c6
-rw-r--r--arch/s390/kernel/init_task.c4
-rw-r--r--arch/s390/kernel/mem_detect.c19
-rw-r--r--arch/s390/kernel/smp.c38
-rw-r--r--arch/s390/mm/pgtable.c19
-rw-r--r--arch/s390/power/Makefile8
-rw-r--r--arch/s390/power/suspend.c40
-rw-r--r--arch/s390/power/swsusp.c30
-rw-r--r--arch/s390/power/swsusp_64.c17
-rw-r--r--arch/s390/power/swsusp_asm64.S199
-rw-r--r--arch/sh/include/asm/kmap_types.h24
-rw-r--r--arch/sh/kernel/init_task.c3
-rw-r--r--arch/sparc/Kconfig5
-rw-r--r--arch/sparc/configs/sparc64_defconfig63
-rw-r--r--arch/sparc/include/asm/cpudata_64.h197
-rw-r--r--arch/sparc/include/asm/dma-mapping.h168
-rw-r--r--arch/sparc/include/asm/dma-mapping_32.h60
-rw-r--r--arch/sparc/include/asm/dma-mapping_64.h154
-rw-r--r--arch/sparc/include/asm/ftrace.h11
-rw-r--r--arch/sparc/include/asm/kmap_types.h17
-rw-r--r--arch/sparc/include/asm/mdesc.h3
-rw-r--r--arch/sparc/include/asm/percpu_64.h8
-rw-r--r--arch/sparc/include/asm/prom.h2
-rw-r--r--arch/sparc/include/asm/trap_block.h207
-rw-r--r--arch/sparc/include/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/cpumap.c431
-rw-r--r--arch/sparc/kernel/cpumap.h16
-rw-r--r--arch/sparc/kernel/dma.c127
-rw-r--r--arch/sparc/kernel/ds.c3
-rw-r--r--arch/sparc/kernel/ftrace.c47
-rw-r--r--arch/sparc/kernel/head_64.S22
-rw-r--r--arch/sparc/kernel/init_task.c3
-rw-r--r--arch/sparc/kernel/iommu.c15
-rw-r--r--arch/sparc/kernel/irq_64.c29
-rw-r--r--arch/sparc/kernel/mdesc.c149
-rw-r--r--arch/sparc/kernel/of_device_32.c195
-rw-r--r--arch/sparc/kernel/of_device_64.c188
-rw-r--r--arch/sparc/kernel/of_device_common.c174
-rw-r--r--arch/sparc/kernel/of_device_common.h36
-rw-r--r--arch/sparc/kernel/pci_sun4v.c15
-rw-r--r--arch/sparc/kernel/prom.h1
-rw-r--r--arch/sparc/kernel/prom_64.c232
-rw-r--r--arch/sparc/kernel/prom_common.c2
-rw-r--r--arch/sparc/kernel/smp_64.c196
-rw-r--r--arch/sparc/kernel/systbls_32.S4
-rw-r--r--arch/sparc/kernel/systbls_64.S6
-rw-r--r--arch/sparc/kernel/traps_64.c170
-rw-r--r--arch/sparc/mm/init_32.c1
-rw-r--r--arch/sparc/mm/init_64.c16
-rw-r--r--arch/sparc/mm/srmmu.c3
-rw-r--r--arch/um/drivers/net_kern.c4
-rw-r--r--arch/um/drivers/ubd_kern.c4
-rw-r--r--arch/um/include/shared/init.h2
-rw-r--r--arch/um/include/shared/net_user.h2
-rw-r--r--arch/um/kernel/init_task.c3
-rw-r--r--arch/um/kernel/irq.c6
-rw-r--r--arch/um/sys-i386/stub.S2
-rw-r--r--arch/um/sys-x86_64/asm/elf.h44
-rw-r--r--arch/um/sys-x86_64/stub.S2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Makefile5
-rw-r--r--arch/x86/include/asm/dma-mapping.h7
-rw-r--r--arch/x86/include/asm/kmap_types.h23
-rw-r--r--arch/x86/include/asm/kmemcheck.h42
-rw-r--r--arch/x86/include/asm/pgtable.h5
-rw-r--r--arch/x86/include/asm/pgtable_types.h9
-rw-r--r--arch/x86/include/asm/string_32.h8
-rw-r--r--arch/x86/include/asm/string_64.h8
-rw-r--r--arch/x86/include/asm/thread_info.h4
-rw-r--r--arch/x86/include/asm/timex.h4
-rw-r--r--arch/x86/include/asm/xor.h5
-rw-r--r--arch/x86/kernel/cpu/common.c11
-rw-r--r--arch/x86/kernel/cpu/intel.c23
-rw-r--r--arch/x86/kernel/cpuid.c6
-rw-r--r--arch/x86/kernel/i8253.c1
-rw-r--r--arch/x86/kernel/init_task.c1
-rw-r--r--arch/x86/kernel/microcode_core.c1
-rw-r--r--arch/x86/kernel/msr.c6
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/stacktrace.c7
-rw-r--r--arch/x86/kernel/traps.c5
-rw-r--r--arch/x86/kernel/tsc.c1
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/fault.c18
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/init_32.c2
-rw-r--r--arch/x86/mm/init_64.c4
-rw-r--r--arch/x86/mm/kmemcheck/Makefile1
-rw-r--r--arch/x86/mm/kmemcheck/error.c228
-rw-r--r--arch/x86/mm/kmemcheck/error.h15
-rw-r--r--arch/x86/mm/kmemcheck/kmemcheck.c640
-rw-r--r--arch/x86/mm/kmemcheck/opcode.c106
-rw-r--r--arch/x86/mm/kmemcheck/opcode.h9
-rw-r--r--arch/x86/mm/kmemcheck/pte.c22
-rw-r--r--arch/x86/mm/kmemcheck/pte.h10
-rw-r--r--arch/x86/mm/kmemcheck/selftest.c69
-rw-r--r--arch/x86/mm/kmemcheck/selftest.h6
-rw-r--r--arch/x86/mm/kmemcheck/shadow.c162
-rw-r--r--arch/x86/mm/kmemcheck/shadow.h16
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/mm/pgtable.c12
-rw-r--r--arch/xtensa/include/asm/kmap_types.h27
-rw-r--r--arch/xtensa/kernel/init_task.c4
267 files changed, 5304 insertions, 2605 deletions
diff --git a/arch/alpha/include/asm/8253pit.h b/arch/alpha/include/asm/8253pit.h
index fef5c1450e47..a71c9c1455a7 100644
--- a/arch/alpha/include/asm/8253pit.h
+++ b/arch/alpha/include/asm/8253pit.h
@@ -1,10 +1,3 @@
/*
* 8253/8254 Programmable Interval Timer
*/
-
-#ifndef _8253PIT_H
-#define _8253PIT_H
-
-#define PIT_TICK_RATE 1193180UL
-
-#endif
diff --git a/arch/alpha/include/asm/kmap_types.h b/arch/alpha/include/asm/kmap_types.h
index 3e6735a34c57..a8d4ec8ea4b6 100644
--- a/arch/alpha/include/asm/kmap_types.h
+++ b/arch/alpha/include/asm/kmap_types.h
@@ -3,30 +3,12 @@
/* Dummy header just to define km_type. */
-
#ifdef CONFIG_DEBUG_HIGHMEM
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define __WITH_KM_FENCE
#endif
-enum km_type {
-D(0) KM_BOUNCE_READ,
-D(1) KM_SKB_SUNRPC_DATA,
-D(2) KM_SKB_DATA_SOFTIRQ,
-D(3) KM_USER0,
-D(4) KM_USER1,
-D(5) KM_BIO_SRC_IRQ,
-D(6) KM_BIO_DST_IRQ,
-D(7) KM_PTE0,
-D(8) KM_PTE1,
-D(9) KM_IRQ0,
-D(10) KM_IRQ1,
-D(11) KM_SOFTIRQ0,
-D(12) KM_SOFTIRQ1,
-D(13) KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
-#undef D
+#undef __WITH_KM_FENCE
#endif
diff --git a/arch/alpha/kernel/init_task.c b/arch/alpha/kernel/init_task.c
index c2938e574a40..19b86328ffd7 100644
--- a/arch/alpha/kernel/init_task.c
+++ b/arch/alpha/kernel/init_task.c
@@ -10,10 +10,7 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_mm);
EXPORT_SYMBOL(init_task);
union thread_union init_thread_union
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 67c19f8a9944..38c805dfc544 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -227,7 +227,7 @@ struct irqaction timer_irqaction = {
.name = "timer",
};
-static struct hw_interrupt_type rtc_irq_type = {
+static struct irq_chip rtc_irq_type = {
.typename = "RTC",
.startup = rtc_startup,
.shutdown = rtc_enable_disable,
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
index 9405bee9894e..50bfec9b588f 100644
--- a/arch/alpha/kernel/irq_i8259.c
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -83,7 +83,7 @@ i8259a_end_irq(unsigned int irq)
i8259a_enable_irq(irq);
}
-struct hw_interrupt_type i8259a_irq_type = {
+struct irq_chip i8259a_irq_type = {
.typename = "XT-PIC",
.startup = i8259a_startup_irq,
.shutdown = i8259a_disable_irq,
diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h
index cc9a8a7aa279..b63ccd7386f1 100644
--- a/arch/alpha/kernel/irq_impl.h
+++ b/arch/alpha/kernel/irq_impl.h
@@ -36,7 +36,7 @@ extern void i8259a_disable_irq(unsigned int);
extern void i8259a_mask_and_ack_irq(unsigned int);
extern unsigned int i8259a_startup_irq(unsigned int);
extern void i8259a_end_irq(unsigned int);
-extern struct hw_interrupt_type i8259a_irq_type;
+extern struct irq_chip i8259a_irq_type;
extern void init_i8259a_irqs(void);
extern void handle_irq(int irq);
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c
index d53edbccbfe5..69199a76ec4a 100644
--- a/arch/alpha/kernel/irq_pyxis.c
+++ b/arch/alpha/kernel/irq_pyxis.c
@@ -70,7 +70,7 @@ pyxis_mask_and_ack_irq(unsigned int irq)
*(vulp)PYXIS_INT_MASK;
}
-static struct hw_interrupt_type pyxis_irq_type = {
+static struct irq_chip pyxis_irq_type = {
.typename = "PYXIS",
.startup = pyxis_startup_irq,
.shutdown = pyxis_disable_irq,
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
index a03fbca4940e..85229369a1f8 100644
--- a/arch/alpha/kernel/irq_srm.c
+++ b/arch/alpha/kernel/irq_srm.c
@@ -48,7 +48,7 @@ srm_end_irq(unsigned int irq)
}
/* Handle interrupts from the SRM, assuming no additional weirdness. */
-static struct hw_interrupt_type srm_irq_type = {
+static struct irq_chip srm_irq_type = {
.typename = "SRM",
.startup = srm_startup_irq,
.shutdown = srm_disable_irq,
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 80df86cd746b..d2634e4476b4 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -252,9 +252,9 @@ reserve_std_resources(void)
}
#define PFN_MAX PFN_DOWN(0x80000000)
-#define for_each_mem_cluster(memdesc, cluster, i) \
- for ((cluster) = (memdesc)->cluster, (i) = 0; \
- (i) < (memdesc)->numclusters; (i)++, (cluster)++)
+#define for_each_mem_cluster(memdesc, _cluster, i) \
+ for ((_cluster) = (memdesc)->cluster, (i) = 0; \
+ (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
static unsigned long __init
get_mem_size_limit(char *s)
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index e53a1e1c2f21..382035ef7394 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -89,7 +89,7 @@ alcor_end_irq(unsigned int irq)
alcor_enable_irq(irq);
}
-static struct hw_interrupt_type alcor_irq_type = {
+static struct irq_chip alcor_irq_type = {
.typename = "ALCOR",
.startup = alcor_startup_irq,
.shutdown = alcor_disable_irq,
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index ace475c124f6..ed349436732b 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -71,7 +71,7 @@ cabriolet_end_irq(unsigned int irq)
cabriolet_enable_irq(irq);
}
-static struct hw_interrupt_type cabriolet_irq_type = {
+static struct irq_chip cabriolet_irq_type = {
.typename = "CABRIOLET",
.startup = cabriolet_startup_irq,
.shutdown = cabriolet_disable_irq,
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 5bd5259324b7..46e70ece5176 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -198,7 +198,7 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
return 0;
}
-static struct hw_interrupt_type dp264_irq_type = {
+static struct irq_chip dp264_irq_type = {
.typename = "DP264",
.startup = dp264_startup_irq,
.shutdown = dp264_disable_irq,
@@ -209,7 +209,7 @@ static struct hw_interrupt_type dp264_irq_type = {
.set_affinity = dp264_set_affinity,
};
-static struct hw_interrupt_type clipper_irq_type = {
+static struct irq_chip clipper_irq_type = {
.typename = "CLIPPER",
.startup = clipper_startup_irq,
.shutdown = clipper_disable_irq,
@@ -298,7 +298,7 @@ clipper_srm_device_interrupt(unsigned long vector)
}
static void __init
-init_tsunami_irqs(struct hw_interrupt_type * ops, int imin, int imax)
+init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
{
long i;
for (i = imin; i <= imax; ++i) {
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index 9c5a306dc0ee..660c23ef661f 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -69,7 +69,7 @@ eb64p_end_irq(unsigned int irq)
eb64p_enable_irq(irq);
}
-static struct hw_interrupt_type eb64p_irq_type = {
+static struct irq_chip eb64p_irq_type = {
.typename = "EB64P",
.startup = eb64p_startup_irq,
.shutdown = eb64p_disable_irq,
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index baf60f36cbd7..b99ea488d844 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -80,7 +80,7 @@ eiger_end_irq(unsigned int irq)
eiger_enable_irq(irq);
}
-static struct hw_interrupt_type eiger_irq_type = {
+static struct irq_chip eiger_irq_type = {
.typename = "EIGER",
.startup = eiger_startup_irq,
.shutdown = eiger_disable_irq,
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index 2b5caf3d9b15..ef0b83a070ac 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -118,7 +118,7 @@ jensen_local_end(unsigned int irq)
i8259a_end_irq(1);
}
-static struct hw_interrupt_type jensen_local_irq_type = {
+static struct irq_chip jensen_local_irq_type = {
.typename = "LOCAL",
.startup = jensen_local_startup,
.shutdown = jensen_local_shutdown,
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index c5a1a2438c67..bbfc4f20ca72 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -169,7 +169,7 @@ marvel_irq_noop_return(unsigned int irq)
return 0;
}
-static struct hw_interrupt_type marvel_legacy_irq_type = {
+static struct irq_chip marvel_legacy_irq_type = {
.typename = "LEGACY",
.startup = marvel_irq_noop_return,
.shutdown = marvel_irq_noop,
@@ -179,7 +179,7 @@ static struct hw_interrupt_type marvel_legacy_irq_type = {
.end = marvel_irq_noop,
};
-static struct hw_interrupt_type io7_lsi_irq_type = {
+static struct irq_chip io7_lsi_irq_type = {
.typename = "LSI",
.startup = io7_startup_irq,
.shutdown = io7_disable_irq,
@@ -189,7 +189,7 @@ static struct hw_interrupt_type io7_lsi_irq_type = {
.end = io7_end_irq,
};
-static struct hw_interrupt_type io7_msi_irq_type = {
+static struct irq_chip io7_msi_irq_type = {
.typename = "MSI",
.startup = io7_startup_irq,
.shutdown = io7_disable_irq,
@@ -273,8 +273,8 @@ init_one_io7_msi(struct io7 *io7, unsigned int which, unsigned int where)
static void __init
init_io7_irqs(struct io7 *io7,
- struct hw_interrupt_type *lsi_ops,
- struct hw_interrupt_type *msi_ops)
+ struct irq_chip *lsi_ops,
+ struct irq_chip *msi_ops)
{
long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16;
long i;
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index 8d3e9429c5ee..4e366641a08e 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -68,7 +68,7 @@ mikasa_end_irq(unsigned int irq)
mikasa_enable_irq(irq);
}
-static struct hw_interrupt_type mikasa_irq_type = {
+static struct irq_chip mikasa_irq_type = {
.typename = "MIKASA",
.startup = mikasa_startup_irq,
.shutdown = mikasa_disable_irq,
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index 538876b62449..35753a173bac 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -73,7 +73,7 @@ noritake_end_irq(unsigned int irq)
noritake_enable_irq(irq);
}
-static struct hw_interrupt_type noritake_irq_type = {
+static struct irq_chip noritake_irq_type = {
.typename = "NORITAKE",
.startup = noritake_startup_irq,
.shutdown = noritake_disable_irq,
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index 672cb2df53df..f3aec7e085c8 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -135,7 +135,7 @@ rawhide_end_irq(unsigned int irq)
rawhide_enable_irq(irq);
}
-static struct hw_interrupt_type rawhide_irq_type = {
+static struct irq_chip rawhide_irq_type = {
.typename = "RAWHIDE",
.startup = rawhide_startup_irq,
.shutdown = rawhide_disable_irq,
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
index f15a329b6011..d9f9cfeb9931 100644
--- a/arch/alpha/kernel/sys_ruffian.c
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/ioport.h>
+#include <linux/timex.h>
#include <linux/init.h>
#include <asm/ptrace.h>
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index ce1faa6f1df1..fc9246373452 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -72,7 +72,7 @@ rx164_end_irq(unsigned int irq)
rx164_enable_irq(irq);
}
-static struct hw_interrupt_type rx164_irq_type = {
+static struct irq_chip rx164_irq_type = {
.typename = "RX164",
.startup = rx164_startup_irq,
.shutdown = rx164_disable_irq,
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 9e263256a42d..426eb6906d01 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -501,7 +501,7 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
spin_unlock(&sable_lynx_irq_lock);
}
-static struct hw_interrupt_type sable_lynx_irq_type = {
+static struct irq_chip sable_lynx_irq_type = {
.typename = "SABLE/LYNX",
.startup = sable_lynx_startup_irq,
.shutdown = sable_lynx_disable_irq,
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index 9bd9a31450c6..830318c21661 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -74,7 +74,7 @@ takara_end_irq(unsigned int irq)
takara_enable_irq(irq);
}
-static struct hw_interrupt_type takara_irq_type = {
+static struct irq_chip takara_irq_type = {
.typename = "TAKARA",
.startup = takara_startup_irq,
.shutdown = takara_disable_irq,
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 8dd239ebdb9e..88978fc60f83 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -185,7 +185,7 @@ titan_srm_device_interrupt(unsigned long vector)
static void __init
-init_titan_irqs(struct hw_interrupt_type * ops, int imin, int imax)
+init_titan_irqs(struct irq_chip * ops, int imin, int imax)
{
long i;
for (i = imin; i <= imax; ++i) {
@@ -194,7 +194,7 @@ init_titan_irqs(struct hw_interrupt_type * ops, int imin, int imax)
}
}
-static struct hw_interrupt_type titan_irq_type = {
+static struct irq_chip titan_irq_type = {
.typename = "TITAN",
.startup = titan_startup_irq,
.shutdown = titan_disable_irq,
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index 42c3eede4d09..e91b4c3838a8 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -157,7 +157,7 @@ wildfire_end_irq(unsigned int irq)
wildfire_enable_irq(irq);
}
-static struct hw_interrupt_type wildfire_irq_type = {
+static struct irq_chip wildfire_irq_type = {
.typename = "WILDFIRE",
.startup = wildfire_startup_irq,
.shutdown = wildfire_disable_irq,
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index a13de49d1265..0eab55749423 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -28,9 +28,9 @@ EXPORT_SYMBOL(node_data);
#define DBGDCONT(args...)
#endif
-#define for_each_mem_cluster(memdesc, cluster, i) \
- for ((cluster) = (memdesc)->cluster, (i) = 0; \
- (i) < (memdesc)->numclusters; (i)++, (cluster)++)
+#define for_each_mem_cluster(memdesc, _cluster, i) \
+ for ((_cluster) = (memdesc)->cluster, (i) = 0; \
+ (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
static void __init show_mem_layout(void)
{
diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c
index e859af349467..3f470866bb89 100644
--- a/arch/arm/kernel/init_task.c
+++ b/arch/arm/kernel/init_task.c
@@ -14,10 +14,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial thread structure.
*
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index 095521e9ee24..01791d74e08e 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -380,12 +380,12 @@ static struct pca953x_platform_data pca9536_data = {
.gpio_base = NR_BUILTIN_GPIO,
};
-static int gpio_bus_switch;
+static int gpio_bus_switch = -EINVAL;
static int pcm990_camera_set_bus_param(struct soc_camera_link *link,
- unsigned long flags)
+ unsigned long flags)
{
- if (gpio_bus_switch <= 0) {
+ if (gpio_bus_switch < 0) {
if (flags == SOCAM_DATAWIDTH_10)
return 0;
else
@@ -404,25 +404,34 @@ static unsigned long pcm990_camera_query_bus_param(struct soc_camera_link *link)
{
int ret;
- if (!gpio_bus_switch) {
+ if (gpio_bus_switch < 0) {
ret = gpio_request(NR_BUILTIN_GPIO, "camera");
if (!ret) {
gpio_bus_switch = NR_BUILTIN_GPIO;
gpio_direction_output(gpio_bus_switch, 0);
- } else
- gpio_bus_switch = -EINVAL;
+ }
}
- if (gpio_bus_switch > 0)
+ if (gpio_bus_switch >= 0)
return SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_10;
else
return SOCAM_DATAWIDTH_10;
}
+static void pcm990_camera_free_bus(struct soc_camera_link *link)
+{
+ if (gpio_bus_switch < 0)
+ return;
+
+ gpio_free(gpio_bus_switch);
+ gpio_bus_switch = -EINVAL;
+}
+
static struct soc_camera_link iclink = {
.bus_id = 0, /* Must match with the camera ID above */
.query_bus_param = pcm990_camera_query_bus_param,
.set_bus_param = pcm990_camera_set_bus_param,
+ .free_bus = pcm990_camera_free_bus,
};
/* Board I2C devices. */
diff --git a/arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h
new file mode 100644
index 000000000000..36a85f5000c8
--- /dev/null
+++ b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h
@@ -0,0 +1,50 @@
+/* arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C - USB2.0 Highspeed/OtG device PHY registers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* Note, this is a seperate header file as some of the clock framework
+ * needs to touch this if the clk_48m is used as the USB OHCI or other
+ * peripheral source.
+*/
+
+#ifndef __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H
+#define __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H __FILE__
+
+/* S3C64XX_PA_USB_HSPHY */
+
+#define S3C_HSOTG_PHYREG(x) ((x) + S3C_VA_USB_HSPHY)
+
+#define S3C_PHYPWR S3C_HSOTG_PHYREG(0x00)
+#define SRC_PHYPWR_OTG_DISABLE (1 << 4)
+#define SRC_PHYPWR_ANALOG_POWERDOWN (1 << 3)
+#define SRC_PHYPWR_FORCE_SUSPEND (1 << 1)
+
+#define S3C_PHYCLK S3C_HSOTG_PHYREG(0x04)
+#define S3C_PHYCLK_MODE_USB11 (1 << 6)
+#define S3C_PHYCLK_EXT_OSC (1 << 5)
+#define S3C_PHYCLK_CLK_FORCE (1 << 4)
+#define S3C_PHYCLK_ID_PULL (1 << 2)
+#define S3C_PHYCLK_CLKSEL_MASK (0x3 << 0)
+#define S3C_PHYCLK_CLKSEL_SHIFT (0)
+#define S3C_PHYCLK_CLKSEL_48M (0x0 << 0)
+#define S3C_PHYCLK_CLKSEL_12M (0x2 << 0)
+#define S3C_PHYCLK_CLKSEL_24M (0x3 << 0)
+
+#define S3C_RSTCON S3C_HSOTG_PHYREG(0x08)
+#define S3C_RSTCON_PHYCLK (1 << 2)
+#define S3C_RSTCON_HCLK (1 << 2)
+#define S3C_RSTCON_PHY (1 << 0)
+
+#define S3C_PHYTUNE S3C_HSOTG_PHYREG(0x20)
+
+#endif /* __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H */
diff --git a/arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h
new file mode 100644
index 000000000000..8d18d9d4d148
--- /dev/null
+++ b/arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h
@@ -0,0 +1,377 @@
+/* arch/arm/plat-s3c/include/plat/regs-usb-hsotg.h
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C - USB2.0 Highspeed/OtG device block registers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __PLAT_S3C64XX_REGS_USB_HSOTG_H
+#define __PLAT_S3C64XX_REGS_USB_HSOTG_H __FILE__
+
+#define S3C_HSOTG_REG(x) (x)
+
+#define S3C_GOTGCTL S3C_HSOTG_REG(0x000)
+#define S3C_GOTGCTL_BSESVLD (1 << 19)
+#define S3C_GOTGCTL_ASESVLD (1 << 18)
+#define S3C_GOTGCTL_DBNC_SHORT (1 << 17)
+#define S3C_GOTGCTL_CONID_B (1 << 16)
+#define S3C_GOTGCTL_DEVHNPEN (1 << 11)
+#define S3C_GOTGCTL_HSSETHNPEN (1 << 10)
+#define S3C_GOTGCTL_HNPREQ (1 << 9)
+#define S3C_GOTGCTL_HSTNEGSCS (1 << 8)
+#define S3C_GOTGCTL_SESREQ (1 << 1)
+#define S3C_GOTGCTL_SESREQSCS (1 << 0)
+
+#define S3C_GOTGINT S3C_HSOTG_REG(0x004)
+#define S3C_GOTGINT_DbnceDone (1 << 19)
+#define S3C_GOTGINT_ADevTOUTChg (1 << 18)
+#define S3C_GOTGINT_HstNegDet (1 << 17)
+#define S3C_GOTGINT_HstnegSucStsChng (1 << 9)
+#define S3C_GOTGINT_SesReqSucStsChng (1 << 8)
+#define S3C_GOTGINT_SesEndDet (1 << 2)
+
+#define S3C_GAHBCFG S3C_HSOTG_REG(0x008)
+#define S3C_GAHBCFG_PTxFEmpLvl (1 << 8)
+#define S3C_GAHBCFG_NPTxFEmpLvl (1 << 7)
+#define S3C_GAHBCFG_DMAEn (1 << 5)
+#define S3C_GAHBCFG_HBstLen_MASK (0xf << 1)
+#define S3C_GAHBCFG_HBstLen_SHIFT (1)
+#define S3C_GAHBCFG_HBstLen_Single (0x0 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr (0x1 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr4 (0x3 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr8 (0x5 << 1)
+#define S3C_GAHBCFG_HBstLen_Incr16 (0x7 << 1)
+#define S3C_GAHBCFG_GlblIntrEn (1 << 0)
+
+#define S3C_GUSBCFG S3C_HSOTG_REG(0x00C)
+#define S3C_GUSBCFG_PHYLPClkSel (1 << 15)
+#define S3C_GUSBCFG_HNPCap (1 << 9)
+#define S3C_GUSBCFG_SRPCap (1 << 8)
+#define S3C_GUSBCFG_PHYIf16 (1 << 3)
+#define S3C_GUSBCFG_TOutCal_MASK (0x7 << 0)
+#define S3C_GUSBCFG_TOutCal_SHIFT (0)
+#define S3C_GUSBCFG_TOutCal_LIMIT (0x7)
+#define S3C_GUSBCFG_TOutCal(_x) ((_x) << 0)
+
+#define S3C_GRSTCTL S3C_HSOTG_REG(0x010)
+
+#define S3C_GRSTCTL_AHBIdle (1 << 31)
+#define S3C_GRSTCTL_DMAReq (1 << 30)
+#define S3C_GRSTCTL_TxFNum_MASK (0x1f << 6)
+#define S3C_GRSTCTL_TxFNum_SHIFT (6)
+#define S3C_GRSTCTL_TxFNum_LIMIT (0x1f)
+#define S3C_GRSTCTL_TxFNum(_x) ((_x) << 6)
+#define S3C_GRSTCTL_TxFFlsh (1 << 5)
+#define S3C_GRSTCTL_RxFFlsh (1 << 4)
+#define S3C_GRSTCTL_INTknQFlsh (1 << 3)
+#define S3C_GRSTCTL_FrmCntrRst (1 << 2)
+#define S3C_GRSTCTL_HSftRst (1 << 1)
+#define S3C_GRSTCTL_CSftRst (1 << 0)
+
+#define S3C_GINTSTS S3C_HSOTG_REG(0x014)
+#define S3C_GINTMSK S3C_HSOTG_REG(0x018)
+
+#define S3C_GINTSTS_WkUpInt (1 << 31)
+#define S3C_GINTSTS_SessReqInt (1 << 30)
+#define S3C_GINTSTS_DisconnInt (1 << 29)
+#define S3C_GINTSTS_ConIDStsChng (1 << 28)
+#define S3C_GINTSTS_PTxFEmp (1 << 26)
+#define S3C_GINTSTS_HChInt (1 << 25)
+#define S3C_GINTSTS_PrtInt (1 << 24)
+#define S3C_GINTSTS_FetSusp (1 << 22)
+#define S3C_GINTSTS_incompIP (1 << 21)
+#define S3C_GINTSTS_IncomplSOIN (1 << 20)
+#define S3C_GINTSTS_OEPInt (1 << 19)
+#define S3C_GINTSTS_IEPInt (1 << 18)
+#define S3C_GINTSTS_EPMis (1 << 17)
+#define S3C_GINTSTS_EOPF (1 << 15)
+#define S3C_GINTSTS_ISOutDrop (1 << 14)
+#define S3C_GINTSTS_EnumDone (1 << 13)
+#define S3C_GINTSTS_USBRst (1 << 12)
+#define S3C_GINTSTS_USBSusp (1 << 11)
+#define S3C_GINTSTS_ErlySusp (1 << 10)
+#define S3C_GINTSTS_GOUTNakEff (1 << 7)
+#define S3C_GINTSTS_GINNakEff (1 << 6)
+#define S3C_GINTSTS_NPTxFEmp (1 << 5)
+#define S3C_GINTSTS_RxFLvl (1 << 4)
+#define S3C_GINTSTS_SOF (1 << 3)
+#define S3C_GINTSTS_OTGInt (1 << 2)
+#define S3C_GINTSTS_ModeMis (1 << 1)
+#define S3C_GINTSTS_CurMod_Host (1 << 0)
+
+#define S3C_GRXSTSR S3C_HSOTG_REG(0x01C)
+#define S3C_GRXSTSP S3C_HSOTG_REG(0x020)
+
+#define S3C_GRXSTS_FN_MASK (0x7f << 25)
+#define S3C_GRXSTS_FN_SHIFT (25)
+
+#define S3C_GRXSTS_PktSts_MASK (0xf << 17)
+#define S3C_GRXSTS_PktSts_SHIFT (17)
+#define S3C_GRXSTS_PktSts_GlobalOutNAK (0x1 << 17)
+#define S3C_GRXSTS_PktSts_OutRX (0x2 << 17)
+#define S3C_GRXSTS_PktSts_OutDone (0x3 << 17)
+#define S3C_GRXSTS_PktSts_SetupDone (0x4 << 17)
+#define S3C_GRXSTS_PktSts_SetupRX (0x6 << 17)
+
+#define S3C_GRXSTS_DPID_MASK (0x3 << 15)
+#define S3C_GRXSTS_DPID_SHIFT (15)
+#define S3C_GRXSTS_ByteCnt_MASK (0x7ff << 4)
+#define S3C_GRXSTS_ByteCnt_SHIFT (4)
+#define S3C_GRXSTS_EPNum_MASK (0xf << 0)
+#define S3C_GRXSTS_EPNum_SHIFT (0)
+
+#define S3C_GRXFSIZ S3C_HSOTG_REG(0x024)
+
+#define S3C_GNPTXFSIZ S3C_HSOTG_REG(0x028)
+
+#define S3C_GNPTXFSIZ_NPTxFDep_MASK (0xffff << 16)
+#define S3C_GNPTXFSIZ_NPTxFDep_SHIFT (16)
+#define S3C_GNPTXFSIZ_NPTxFDep_LIMIT (0xffff)
+#define S3C_GNPTXFSIZ_NPTxFDep(_x) ((_x) << 16)
+#define S3C_GNPTXFSIZ_NPTxFStAddr_MASK (0xffff << 0)
+#define S3C_GNPTXFSIZ_NPTxFStAddr_SHIFT (0)
+#define S3C_GNPTXFSIZ_NPTxFStAddr_LIMIT (0xffff)
+#define S3C_GNPTXFSIZ_NPTxFStAddr(_x) ((_x) << 0)
+
+#define S3C_GNPTXSTS S3C_HSOTG_REG(0x02C)
+
+#define S3C_GNPTXSTS_NPtxQTop_MASK (0x7f << 24)
+#define S3C_GNPTXSTS_NPtxQTop_SHIFT (24)
+
+#define S3C_GNPTXSTS_NPTxQSpcAvail_MASK (0xff << 16)
+#define S3C_GNPTXSTS_NPTxQSpcAvail_SHIFT (16)
+#define S3C_GNPTXSTS_NPTxQSpcAvail_GET(_v) (((_v) >> 16) & 0xff)
+
+#define S3C_GNPTXSTS_NPTxFSpcAvail_MASK (0xffff << 0)
+#define S3C_GNPTXSTS_NPTxFSpcAvail_SHIFT (0)
+#define S3C_GNPTXSTS_NPTxFSpcAvail_GET(_v) (((_v) >> 0) & 0xffff)
+
+
+#define S3C_HPTXFSIZ S3C_HSOTG_REG(0x100)
+
+#define S3C_DPTXFSIZn(_a) S3C_HSOTG_REG(0x104 + (((_a) - 1) * 4))
+
+#define S3C_DPTXFSIZn_DPTxFSize_MASK (0xffff << 16)
+#define S3C_DPTXFSIZn_DPTxFSize_SHIFT (16)
+#define S3C_DPTXFSIZn_DPTxFSize_GET(_v) (((_v) >> 16) & 0xffff)
+#define S3C_DPTXFSIZn_DPTxFSize_LIMIT (0xffff)
+#define S3C_DPTXFSIZn_DPTxFSize(_x) ((_x) << 16)
+
+#define S3C_DPTXFSIZn_DPTxFStAddr_MASK (0xffff << 0)
+#define S3C_DPTXFSIZn_DPTxFStAddr_SHIFT (0)
+
+/* Device mode registers */
+#define S3C_DCFG S3C_HSOTG_REG(0x800)
+
+#define S3C_DCFG_EPMisCnt_MASK (0x1f << 18)
+#define S3C_DCFG_EPMisCnt_SHIFT (18)
+#define S3C_DCFG_EPMisCnt_LIMIT (0x1f)
+#define S3C_DCFG_EPMisCnt(_x) ((_x) << 18)
+
+#define S3C_DCFG_PerFrInt_MASK (0x3 << 11)
+#define S3C_DCFG_PerFrInt_SHIFT (11)
+#define S3C_DCFG_PerFrInt_LIMIT (0x3)
+#define S3C_DCFG_PerFrInt(_x) ((_x) << 11)
+
+#define S3C_DCFG_DevAddr_MASK (0x7f << 4)
+#define S3C_DCFG_DevAddr_SHIFT (4)
+#define S3C_DCFG_DevAddr_LIMIT (0x7f)
+#define S3C_DCFG_DevAddr(_x) ((_x) << 4)
+
+#define S3C_DCFG_NZStsOUTHShk (1 << 2)
+
+#define S3C_DCFG_DevSpd_MASK (0x3 << 0)
+#define S3C_DCFG_DevSpd_SHIFT (0)
+#define S3C_DCFG_DevSpd_HS (0x0 << 0)
+#define S3C_DCFG_DevSpd_FS (0x1 << 0)
+#define S3C_DCFG_DevSpd_LS (0x2 << 0)
+#define S3C_DCFG_DevSpd_FS48 (0x3 << 0)
+
+#define S3C_DCTL S3C_HSOTG_REG(0x804)
+
+#define S3C_DCTL_PWROnPrgDone (1 << 11)
+#define S3C_DCTL_CGOUTNak (1 << 10)
+#define S3C_DCTL_SGOUTNak (1 << 9)
+#define S3C_DCTL_CGNPInNAK (1 << 8)
+#define S3C_DCTL_SGNPInNAK (1 << 7)
+#define S3C_DCTL_TstCtl_MASK (0x7 << 4)
+#define S3C_DCTL_TstCtl_SHIFT (4)
+#define S3C_DCTL_GOUTNakSts (1 << 3)
+#define S3C_DCTL_GNPINNakSts (1 << 2)
+#define S3C_DCTL_SftDiscon (1 << 1)
+#define S3C_DCTL_RmtWkUpSig (1 << 0)
+
+#define S3C_DSTS S3C_HSOTG_REG(0x808)
+
+#define S3C_DSTS_SOFFN_MASK (0x3fff << 8)
+#define S3C_DSTS_SOFFN_SHIFT (8)
+#define S3C_DSTS_SOFFN_LIMIT (0x3fff)
+#define S3C_DSTS_SOFFN(_x) ((_x) << 8)
+#define S3C_DSTS_ErraticErr (1 << 3)
+#define S3C_DSTS_EnumSpd_MASK (0x3 << 1)
+#define S3C_DSTS_EnumSpd_SHIFT (1)
+#define S3C_DSTS_EnumSpd_HS (0x0 << 1)
+#define S3C_DSTS_EnumSpd_FS (0x1 << 1)
+#define S3C_DSTS_EnumSpd_LS (0x2 << 1)
+#define S3C_DSTS_EnumSpd_FS48 (0x3 << 1)
+
+#define S3C_DSTS_SuspSts (1 << 0)
+
+#define S3C_DIEPMSK S3C_HSOTG_REG(0x810)
+
+#define S3C_DIEPMSK_INEPNakEffMsk (1 << 6)
+#define S3C_DIEPMSK_INTknEPMisMsk (1 << 5)
+#define S3C_DIEPMSK_INTknTXFEmpMsk (1 << 4)
+#define S3C_DIEPMSK_TimeOUTMsk (1 << 3)
+#define S3C_DIEPMSK_AHBErrMsk (1 << 2)
+#define S3C_DIEPMSK_EPDisbldMsk (1 << 1)
+#define S3C_DIEPMSK_XferComplMsk (1 << 0)
+
+#define S3C_DOEPMSK S3C_HSOTG_REG(0x814)
+
+#define S3C_DOEPMSK_Back2BackSetup (1 << 6)
+#define S3C_DOEPMSK_OUTTknEPdisMsk (1 << 4)
+#define S3C_DOEPMSK_SetupMsk (1 << 3)
+#define S3C_DOEPMSK_AHBErrMsk (1 << 2)
+#define S3C_DOEPMSK_EPDisbldMsk (1 << 1)
+#define S3C_DOEPMSK_XferComplMsk (1 << 0)
+
+#define S3C_DAINT S3C_HSOTG_REG(0x818)
+#define S3C_DAINTMSK S3C_HSOTG_REG(0x81C)
+
+#define S3C_DAINT_OutEP_SHIFT (16)
+#define S3C_DAINT_OutEP(x) (1 << ((x) + 16))
+#define S3C_DAINT_InEP(x) (1 << (x))
+
+#define S3C_DTKNQR1 S3C_HSOTG_REG(0x820)
+#define S3C_DTKNQR2 S3C_HSOTG_REG(0x824)
+#define S3C_DTKNQR3 S3C_HSOTG_REG(0x830)
+#define S3C_DTKNQR4 S3C_HSOTG_REG(0x834)
+
+#define S3C_DVBUSDIS S3C_HSOTG_REG(0x828)
+#define S3C_DVBUSPULSE S3C_HSOTG_REG(0x82C)
+
+#define S3C_DIEPCTL0 S3C_HSOTG_REG(0x900)
+#define S3C_DOEPCTL0 S3C_HSOTG_REG(0xB00)
+#define S3C_DIEPCTL(_a) S3C_HSOTG_REG(0x900 + ((_a) * 0x20))
+#define S3C_DOEPCTL(_a) S3C_HSOTG_REG(0xB00 + ((_a) * 0x20))
+
+/* EP0 specialness:
+ * bits[29..28] - reserved (no SetD0PID, SetD1PID)
+ * bits[25..22] - should always be zero, this isn't a periodic endpoint
+ * bits[10..0] - MPS setting differenct for EP0
+*/
+#define S3C_D0EPCTL_MPS_MASK (0x3 << 0)
+#define S3C_D0EPCTL_MPS_SHIFT (0)
+#define S3C_D0EPCTL_MPS_64 (0x0 << 0)
+#define S3C_D0EPCTL_MPS_32 (0x1 << 0)
+#define S3C_D0EPCTL_MPS_16 (0x2 << 0)
+#define S3C_D0EPCTL_MPS_8 (0x3 << 0)
+
+#define S3C_DxEPCTL_EPEna (1 << 31)
+#define S3C_DxEPCTL_EPDis (1 << 30)
+#define S3C_DxEPCTL_SetD1PID (1 << 29)
+#define S3C_DxEPCTL_SetOddFr (1 << 29)
+#define S3C_DxEPCTL_SetD0PID (1 << 28)
+#define S3C_DxEPCTL_SetEvenFr (1 << 28)
+#define S3C_DxEPCTL_SNAK (1 << 27)
+#define S3C_DxEPCTL_CNAK (1 << 26)
+#define S3C_DxEPCTL_TxFNum_MASK (0xf << 22)
+#define S3C_DxEPCTL_TxFNum_SHIFT (22)
+#define S3C_DxEPCTL_TxFNum_LIMIT (0xf)
+#define S3C_DxEPCTL_TxFNum(_x) ((_x) << 22)
+
+#define S3C_DxEPCTL_Stall (1 << 21)
+#define S3C_DxEPCTL_Snp (1 << 20)
+#define S3C_DxEPCTL_EPType_MASK (0x3 << 18)
+#define S3C_DxEPCTL_EPType_SHIFT (18)
+#define S3C_DxEPCTL_EPType_Control (0x0 << 18)
+#define S3C_DxEPCTL_EPType_Iso (0x1 << 18)
+#define S3C_DxEPCTL_EPType_Bulk (0x2 << 18)
+#define S3C_DxEPCTL_EPType_Intterupt (0x3 << 18)
+
+#define S3C_DxEPCTL_NAKsts (1 << 17)
+#define S3C_DxEPCTL_DPID (1 << 16)
+#define S3C_DxEPCTL_EOFrNum (1 << 16)
+#define S3C_DxEPCTL_USBActEp (1 << 15)
+#define S3C_DxEPCTL_NextEp_MASK (0xf << 11)
+#define S3C_DxEPCTL_NextEp_SHIFT (11)
+#define S3C_DxEPCTL_NextEp_LIMIT (0xf)
+#define S3C_DxEPCTL_NextEp(_x) ((_x) << 11)
+
+#define S3C_DxEPCTL_MPS_MASK (0x7ff << 0)
+#define S3C_DxEPCTL_MPS_SHIFT (0)
+#define S3C_DxEPCTL_MPS_LIMIT (0x7ff)
+#define S3C_DxEPCTL_MPS(_x) ((_x) << 0)
+
+#define S3C_DIEPINT(_a) S3C_HSOTG_REG(0x908 + ((_a) * 0x20))
+#define S3C_DOEPINT(_a) S3C_HSOTG_REG(0xB08 + ((_a) * 0x20))
+
+#define S3C_DxEPINT_INEPNakEff (1 << 6)
+#define S3C_DxEPINT_Back2BackSetup (1 << 6)
+#define S3C_DxEPINT_INTknEPMis (1 << 5)
+#define S3C_DxEPINT_INTknTXFEmp (1 << 4)
+#define S3C_DxEPINT_OUTTknEPdis (1 << 4)
+#define S3C_DxEPINT_Timeout (1 << 3)
+#define S3C_DxEPINT_Setup (1 << 3)
+#define S3C_DxEPINT_AHBErr (1 << 2)
+#define S3C_DxEPINT_EPDisbld (1 << 1)
+#define S3C_DxEPINT_XferCompl (1 << 0)
+
+#define S3C_DIEPTSIZ0 S3C_HSOTG_REG(0x910)
+
+#define S3C_DIEPTSIZ0_PktCnt_MASK (0x3 << 19)
+#define S3C_DIEPTSIZ0_PktCnt_SHIFT (19)
+#define S3C_DIEPTSIZ0_PktCnt_LIMIT (0x3)
+#define S3C_DIEPTSIZ0_PktCnt(_x) ((_x) << 19)
+
+#define S3C_DIEPTSIZ0_XferSize_MASK (0x7f << 0)
+#define S3C_DIEPTSIZ0_XferSize_SHIFT (0)
+#define S3C_DIEPTSIZ0_XferSize_LIMIT (0x7f)
+#define S3C_DIEPTSIZ0_XferSize(_x) ((_x) << 0)
+
+
+#define DOEPTSIZ0 S3C_HSOTG_REG(0xB10)
+#define S3C_DOEPTSIZ0_SUPCnt_MASK (0x3 << 29)
+#define S3C_DOEPTSIZ0_SUPCnt_SHIFT (29)
+#define S3C_DOEPTSIZ0_SUPCnt_LIMIT (0x3)
+#define S3C_DOEPTSIZ0_SUPCnt(_x) ((_x) << 29)
+
+#define S3C_DOEPTSIZ0_PktCnt (1 << 19)
+#define S3C_DOEPTSIZ0_XferSize_MASK (0x7f << 0)
+#define S3C_DOEPTSIZ0_XferSize_SHIFT (0)
+
+#define S3C_DIEPTSIZ(_a) S3C_HSOTG_REG(0x910 + ((_a) * 0x20))
+#define S3C_DOEPTSIZ(_a) S3C_HSOTG_REG(0xB10 + ((_a) * 0x20))
+
+#define S3C_DxEPTSIZ_MC_MASK (0x3 << 29)
+#define S3C_DxEPTSIZ_MC_SHIFT (29)
+#define S3C_DxEPTSIZ_MC_LIMIT (0x3)
+#define S3C_DxEPTSIZ_MC(_x) ((_x) << 29)
+
+#define S3C_DxEPTSIZ_PktCnt_MASK (0x3ff << 19)
+#define S3C_DxEPTSIZ_PktCnt_SHIFT (19)
+#define S3C_DxEPTSIZ_PktCnt_GET(_v) (((_v) >> 19) & 0x3ff)
+#define S3C_DxEPTSIZ_PktCnt_LIMIT (0x3ff)
+#define S3C_DxEPTSIZ_PktCnt(_x) ((_x) << 19)
+
+#define S3C_DxEPTSIZ_XferSize_MASK (0x7ffff << 0)
+#define S3C_DxEPTSIZ_XferSize_SHIFT (0)
+#define S3C_DxEPTSIZ_XferSize_GET(_v) (((_v) >> 0) & 0x7ffff)
+#define S3C_DxEPTSIZ_XferSize_LIMIT (0x7ffff)
+#define S3C_DxEPTSIZ_XferSize(_x) ((_x) << 0)
+
+
+#define S3C_DIEPDMA(_a) S3C_HSOTG_REG(0x914 + ((_a) * 0x20))
+#define S3C_DOEPDMA(_a) S3C_HSOTG_REG(0xB14 + ((_a) * 0x20))
+
+#define S3C_EPFIFO(_a) S3C_HSOTG_REG(0x1000 + ((_a) * 0x1000))
+
+#endif /* __PLAT_S3C64XX_REGS_USB_HSOTG_H */
diff --git a/arch/avr32/kernel/init_task.c b/arch/avr32/kernel/init_task.c
index 993d56ee3cf3..57ec9f2dcd95 100644
--- a/arch/avr32/kernel/init_task.c
+++ b/arch/avr32/kernel/init_task.c
@@ -15,10 +15,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial thread structure. Must be aligned on an 8192-byte boundary.
*/
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index a60cfe757914..8ea0d942cdea 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -6,59 +6,65 @@
mainmenu "Blackfin Kernel Configuration"
config MMU
- bool
- default n
+ def_bool n
config FPU
- bool
- default n
+ def_bool n
config RWSEM_GENERIC_SPINLOCK
- bool
- default y
+ def_bool y
config RWSEM_XCHGADD_ALGORITHM
- bool
- default n
+ def_bool n
config BLACKFIN
- bool
- default y
+ def_bool y
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
select HAVE_IDE
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_BZIP2
+ select HAVE_KERNEL_LZMA
select HAVE_OPROFILE
select ARCH_WANT_OPTIONAL_GPIOLIB
+config GENERIC_BUG
+ def_bool y
+ depends on BUG
+
config ZONE_DMA
- bool
- default y
+ def_bool y
config GENERIC_FIND_NEXT_BIT
- bool
- default y
+ def_bool y
config GENERIC_HWEIGHT
- bool
- default y
+ def_bool y
config GENERIC_HARDIRQS
- bool
- default y
+ def_bool y
config GENERIC_IRQ_PROBE
- bool
- default y
+ def_bool y
config GENERIC_GPIO
- bool
- default y
+ def_bool y
config FORCE_MAX_ZONEORDER
int
default "14"
config GENERIC_CALIBRATE_DELAY
- bool
- default y
+ def_bool y
+
+config LOCKDEP_SUPPORT
+ def_bool y
+
+config STACKTRACE_SUPPORT
+ def_bool y
+
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
source "init/Kconfig"
@@ -408,12 +414,12 @@ comment "Clock/PLL Setup"
config CLKIN_HZ
int "Frequency of the crystal on the board in Hz"
+ default "10000000" if BFIN532_IP0X
default "11059200" if BFIN533_STAMP
+ default "24576000" if PNAV10
+ default "25000000" # most people use this
default "27000000" if BFIN533_EZKIT
- default "25000000" if (BFIN537_STAMP || BFIN527_EZKIT || H8606_HVSISTEMAS || BLACKSTAMP || BFIN526_EZBRD || BFIN538_EZKIT || BFIN518F-EZBRD)
default "30000000" if BFIN561_EZKIT
- default "24576000" if PNAV10
- default "10000000" if BFIN532_IP0X
help
The frequency of CLKIN crystal oscillator on the board in Hz.
Warning: This value should match the crystal on the board. Otherwise,
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index d54c8283825c..6f9533c3d752 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -137,7 +137,7 @@ archclean:
INSTALL_PATH ?= /tftpboot
boot := arch/$(ARCH)/boot
-BOOT_TARGETS = vmImage
+BOOT_TARGETS = vmImage vmImage.bz2 vmImage.gz vmImage.lzma
PHONY += $(BOOT_TARGETS) install
KBUILD_IMAGE := $(boot)/vmImage
@@ -150,7 +150,10 @@ install:
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
define archhelp
- echo '* vmImage - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage)'
+ echo '* vmImage - Alias to selected kernel format (vmImage.gz by default)'
+ echo ' vmImage.bz2 - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.bz2)'
+ echo '* vmImage.gz - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.gz)'
+ echo ' vmImage.lzma - Kernel-only image for U-Boot (arch/$(ARCH)/boot/vmImage.lzma)'
echo ' install - Install kernel using'
echo ' (your) ~/bin/$(CROSS_COMPILE)installkernel or'
echo ' (distribution) PATH: $(CROSS_COMPILE)installkernel or'
diff --git a/arch/blackfin/boot/.gitignore b/arch/blackfin/boot/.gitignore
index 3ae03994b88d..229e50808677 100644
--- a/arch/blackfin/boot/.gitignore
+++ b/arch/blackfin/boot/.gitignore
@@ -1 +1,2 @@
-+vmImage
+vmImage*
+vmlinux*
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index e028d13481a9..3ab6f23561dd 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -8,24 +8,41 @@
MKIMAGE := $(srctree)/scripts/mkuboot.sh
-targets := vmImage
-extra-y += vmlinux.bin vmlinux.gz
+targets := vmImage vmImage.bz2 vmImage.gz vmImage.lzma
+extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma
quiet_cmd_uimage = UIMAGE $@
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \
- -C gzip -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \
+ -C $(2) -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \
-e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \
-d $< $@
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
-$(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
-$(obj)/vmImage: $(obj)/vmlinux.gz
- $(call if_changed,uimage)
- @$(kecho) 'Kernel: $@ is ready'
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,bzip2)
+
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzma)
+
+$(obj)/vmImage.bz2: $(obj)/vmlinux.bin.bz2
+ $(call if_changed,uimage,bzip2)
+
+$(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz
+ $(call if_changed,uimage,gzip)
+
+$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma
+ $(call if_changed,uimage,lzma)
+
+suffix-$(CONFIG_KERNEL_GZIP) := gz
+suffix-$(CONFIG_KERNEL_BZIP2) := bz2
+suffix-$(CONFIG_KERNEL_LZMA) := lzma
+$(obj)/vmImage: $(obj)/vmImage.$(suffix-y)
+ @ln -sf $(notdir $<) $@
install:
sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index 7bbf44e4ddf9..b1d92f13ef96 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -90,7 +90,7 @@ static inline int atomic_test_mask(int mask, atomic_t *v)
static inline void atomic_add(int i, atomic_t *v)
{
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter += i;
@@ -99,7 +99,7 @@ static inline void atomic_add(int i, atomic_t *v)
static inline void atomic_sub(int i, atomic_t *v)
{
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter -= i;
@@ -110,7 +110,7 @@ static inline void atomic_sub(int i, atomic_t *v)
static inline int atomic_add_return(int i, atomic_t *v)
{
int __temp = 0;
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter += i;
@@ -124,7 +124,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
static inline int atomic_sub_return(int i, atomic_t *v)
{
int __temp = 0;
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter -= i;
@@ -136,7 +136,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
static inline void atomic_inc(volatile atomic_t *v)
{
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter++;
@@ -145,7 +145,7 @@ static inline void atomic_inc(volatile atomic_t *v)
static inline void atomic_dec(volatile atomic_t *v)
{
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter--;
@@ -154,7 +154,7 @@ static inline void atomic_dec(volatile atomic_t *v)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter &= ~mask;
@@ -163,7 +163,7 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
- long flags;
+ unsigned long flags;
local_irq_save_hw(flags);
v->counter |= mask;
diff --git a/arch/blackfin/include/asm/bfin-global.h b/arch/blackfin/include/asm/bfin-global.h
index daffc0684e75..e39277ea43e8 100644
--- a/arch/blackfin/include/asm/bfin-global.h
+++ b/arch/blackfin/include/asm/bfin-global.h
@@ -31,7 +31,7 @@
#ifndef __ASSEMBLY__
-#include <asm-generic/sections.h>
+#include <asm/sections.h>
#include <asm/ptrace.h>
#include <asm/user.h>
#include <linux/linkage.h>
@@ -99,15 +99,6 @@ extern const char bfin_board_name[];
extern unsigned long bfin_sic_iwr[];
extern unsigned vr_wakeup;
extern u16 _bfin_swrst; /* shadow for Software Reset Register (SWRST) */
-extern unsigned long _ramstart, _ramend, _rambase;
-extern unsigned long memory_start, memory_end, physical_mem_end;
-extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
- _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
- _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[],
- _ebss_l2[], _l2_lma_start[];
-
-/* only used when MTD_UCLINUX */
-extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
#ifdef CONFIG_BFIN_ICACHE_LOCK
extern void cache_grab_lock(int way);
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index 21b036eadab1..75fee2f7d9f2 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -109,7 +109,8 @@ static inline void clear_bit(int nr, volatile unsigned long *addr)
static inline void change_bit(int nr, volatile unsigned long *addr)
{
- int mask, flags;
+ int mask;
+ unsigned long flags;
unsigned long *ADDR = (unsigned long *)addr;
ADDR += nr >> 5;
diff --git a/arch/blackfin/include/asm/bug.h b/arch/blackfin/include/asm/bug.h
index 6d3e11b1fc57..655e49540e41 100644
--- a/arch/blackfin/include/asm/bug.h
+++ b/arch/blackfin/include/asm/bug.h
@@ -2,13 +2,58 @@
#define _BLACKFIN_BUG_H
#ifdef CONFIG_BUG
-#define HAVE_ARCH_BUG
-#define BUG() do { \
- dump_bfin_trace_buffer(); \
- printk(KERN_EMERG "BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
- panic("BUG!"); \
-} while (0)
+#define BFIN_BUG_OPCODE 0xefcd
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+#define _BUG_OR_WARN(flags) \
+ asm volatile( \
+ "1: .hword %0\n" \
+ " .section __bug_table,\"a\",@progbits\n" \
+ "2: .long 1b\n" \
+ " .long %1\n" \
+ " .short %2\n" \
+ " .short %3\n" \
+ " .org 2b + %4\n" \
+ " .previous" \
+ : \
+ : "i"(BFIN_BUG_OPCODE), "i"(__FILE__), \
+ "i"(__LINE__), "i"(flags), \
+ "i"(sizeof(struct bug_entry)))
+
+#else
+
+#define _BUG_OR_WARN(flags) \
+ asm volatile( \
+ "1: .hword %0\n" \
+ " .section __bug_table,\"a\",@progbits\n" \
+ "2: .long 1b\n" \
+ " .short %1\n" \
+ " .org 2b + %2\n" \
+ " .previous" \
+ : \
+ : "i"(BFIN_BUG_OPCODE), "i"(flags), \
+ "i"(sizeof(struct bug_entry)))
+
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
+#define BUG() \
+ do { \
+ _BUG_OR_WARN(0); \
+ for (;;); \
+ } while (0)
+
+#define WARN_ON(condition) \
+ ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ _BUG_OR_WARN(BUGFLAG_WARNING); \
+ unlikely(__ret_warn_on); \
+ })
+
+#define HAVE_ARCH_BUG
+#define HAVE_ARCH_WARN_ON
#endif
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index 86637814cf25..2ef669ed9222 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -34,9 +34,13 @@
#define L1_CACHE_SHIFT_MAX 5
#if defined(CONFIG_SMP) && \
- !defined(CONFIG_BFIN_CACHE_COHERENT) && \
- defined(CONFIG_BFIN_DCACHE)
-#define __ARCH_SYNC_CORE_DCACHE
+ !defined(CONFIG_BFIN_CACHE_COHERENT)
+# if defined(CONFIG_BFIN_ICACHE)
+# define __ARCH_SYNC_CORE_ICACHE
+# endif
+# if defined(CONFIG_BFIN_DCACHE)
+# define __ARCH_SYNC_CORE_DCACHE
+# endif
#ifndef __ASSEMBLY__
asmlinkage void __raw_smp_mark_barrier_asm(void);
asmlinkage void __raw_smp_check_barrier_asm(void);
@@ -51,6 +55,7 @@ static inline void smp_check_barrier(void)
}
void resync_core_dcache(void);
+void resync_core_icache(void);
#endif
#endif
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 94697f0f6f40..5c17dee53b5d 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -37,6 +37,7 @@ extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned lo
extern void blackfin_dcache_invalidate_range(unsigned long start_address, unsigned long end_address);
extern void blackfin_dflush_page(void *page);
extern void blackfin_invalidate_entire_dcache(void);
+extern void blackfin_invalidate_entire_icache(void);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
@@ -97,7 +98,7 @@ do { memcpy(dst, src, len); \
extern unsigned long reserved_mem_dcache_on;
extern unsigned long reserved_mem_icache_on;
-static inline int bfin_addr_dcachable(unsigned long addr)
+static inline int bfin_addr_dcacheable(unsigned long addr)
{
#ifdef CONFIG_BFIN_DCACHE
if (addr < (_ramend - DMA_UNCACHED_REGION))
diff --git a/arch/blackfin/include/asm/cpu.h b/arch/blackfin/include/asm/cpu.h
index c2594ef877f6..565b8136855e 100644
--- a/arch/blackfin/include/asm/cpu.h
+++ b/arch/blackfin/include/asm/cpu.h
@@ -34,6 +34,7 @@ struct blackfin_cpudata {
unsigned int dmemctl;
unsigned long loops_per_jiffy;
unsigned long dcache_invld_count;
+ unsigned long icache_invld_count;
};
DECLARE_PER_CPU(struct blackfin_cpudata, cpu_data);
diff --git a/arch/blackfin/include/asm/ftrace.h b/arch/blackfin/include/asm/ftrace.h
index 40a8c178f10d..8643680f0f78 100644
--- a/arch/blackfin/include/asm/ftrace.h
+++ b/arch/blackfin/include/asm/ftrace.h
@@ -1 +1,13 @@
-/* empty */
+/*
+ * Blackfin ftrace code
+ *
+ * Copyright 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __ASM_BFIN_FTRACE_H__
+#define __ASM_BFIN_FTRACE_H__
+
+#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call: LINK + CALL */
+
+#endif
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h
index 51d0bf5e2899..bbe1c3726b69 100644
--- a/arch/blackfin/include/asm/ipipe.h
+++ b/arch/blackfin/include/asm/ipipe.h
@@ -35,10 +35,10 @@
#include <asm/atomic.h>
#include <asm/traps.h>
-#define IPIPE_ARCH_STRING "1.9-01"
+#define IPIPE_ARCH_STRING "1.10-00"
#define IPIPE_MAJOR_NUMBER 1
-#define IPIPE_MINOR_NUMBER 9
-#define IPIPE_PATCH_NUMBER 1
+#define IPIPE_MINOR_NUMBER 10
+#define IPIPE_PATCH_NUMBER 0
#ifdef CONFIG_SMP
#error "I-pipe/blackfin: SMP not implemented"
@@ -54,10 +54,11 @@ do { \
#define task_hijacked(p) \
({ \
- int __x__ = ipipe_current_domain != ipipe_root_domain; \
- /* We would need to clear the SYNC flag for the root domain */ \
- /* over the current processor in SMP mode. */ \
- local_irq_enable_hw(); __x__; \
+ int __x__ = __ipipe_root_domain_p; \
+ __clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \
+ if (__x__) \
+ local_irq_enable_hw(); \
+ !__x__; \
})
struct ipipe_domain;
@@ -179,23 +180,24 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
#define __ipipe_run_isr(ipd, irq) \
do { \
- if (ipd == ipipe_root_domain) { \
+ if (!__ipipe_pipeline_head_p(ipd)) \
local_irq_enable_hw(); \
- if (ipipe_virtual_irq_p(irq)) \
+ if (ipd == ipipe_root_domain) { \
+ if (unlikely(ipipe_virtual_irq_p(irq))) { \
+ irq_enter(); \
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
- else \
+ irq_exit(); \
+ } else \
ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \
- local_irq_disable_hw(); \
} else { \
__clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
- local_irq_enable_nohead(ipd); \
ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
/* Attempt to exit the outer interrupt level before \
* starting the deferred IRQ processing. */ \
- local_irq_disable_nohead(ipd); \
__ipipe_run_irqtail(); \
__set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
} \
+ local_irq_disable_hw(); \
} while (0)
#define __ipipe_syscall_watched_p(p, sc) \
diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h
index 7645e85a5f6f..400bdd52ce87 100644
--- a/arch/blackfin/include/asm/irq.h
+++ b/arch/blackfin/include/asm/irq.h
@@ -17,270 +17,17 @@
#ifndef _BFIN_IRQ_H_
#define _BFIN_IRQ_H_
-/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h>*/
-#include <mach/irq.h>
-#include <asm/pda.h>
-#include <asm/processor.h>
-
-#ifdef CONFIG_SMP
-/* Forward decl needed due to cdef inter dependencies */
-static inline uint32_t __pure bfin_dspid(void);
-# define blackfin_core_id() (bfin_dspid() & 0xff)
-# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
-#else
-extern unsigned long bfin_irq_flags;
-#endif
-
-#ifdef CONFIG_IPIPE
-
-#include <linux/ipipe_trace.h>
+#include <linux/irqflags.h>
-void __ipipe_unstall_root(void);
-
-void __ipipe_restore_root(unsigned long flags);
-
-#ifdef CONFIG_DEBUG_HWERR
-# define __all_masked_irq_flags 0x3f
-# define __save_and_cli_hw(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- "sti %1;" \
- : "=&d"(x) \
- : "d" (0x3F) \
- )
-#else
-# define __all_masked_irq_flags 0x1f
-# define __save_and_cli_hw(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- : "=&d"(x) \
- )
-#endif
-
-#define irqs_enabled_from_flags_hw(x) ((x) != __all_masked_irq_flags)
-#define raw_irqs_disabled_flags(flags) (!irqs_enabled_from_flags_hw(flags))
-#define local_test_iflag_hw(x) irqs_enabled_from_flags_hw(x)
-
-#define local_save_flags(x) \
- do { \
- (x) = __ipipe_test_root() ? \
- __all_masked_irq_flags : bfin_irq_flags; \
- barrier(); \
- } while (0)
-
-#define local_irq_save(x) \
- do { \
- (x) = __ipipe_test_and_stall_root() ? \
- __all_masked_irq_flags : bfin_irq_flags; \
- barrier(); \
- } while (0)
-
-static inline void local_irq_restore(unsigned long x)
-{
- barrier();
- __ipipe_restore_root(x == __all_masked_irq_flags);
-}
-
-#define local_irq_disable() \
- do { \
- __ipipe_stall_root(); \
- barrier(); \
- } while (0)
-
-static inline void local_irq_enable(void)
-{
- barrier();
- __ipipe_unstall_root();
-}
-
-#define irqs_disabled() __ipipe_test_root()
-
-#define local_save_flags_hw(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- "sti %0;" \
- : "=d"(x) \
- )
-
-#define irqs_disabled_hw() \
- ({ \
- unsigned long flags; \
- local_save_flags_hw(flags); \
- !irqs_enabled_from_flags_hw(flags); \
- })
-
-static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real)
-{
- /* Merge virtual and real interrupt mask bits into a single
- 32bit word. */
- return (real & ~(1 << 31)) | ((virt != 0) << 31);
-}
-
-static inline int raw_demangle_irq_bits(unsigned long *x)
-{
- int virt = (*x & (1 << 31)) != 0;
- *x &= ~(1L << 31);
- return virt;
-}
-
-#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
-
-#define local_irq_disable_hw() \
- do { \
- int _tmp_dummy; \
- if (!irqs_disabled_hw()) \
- ipipe_trace_begin(0x80000000); \
- __asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \
- } while (0)
-
-#define local_irq_enable_hw() \
- do { \
- if (irqs_disabled_hw()) \
- ipipe_trace_end(0x80000000); \
- __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags)); \
- } while (0)
-
-#define local_irq_save_hw(x) \
- do { \
- __save_and_cli_hw(x); \
- if (local_test_iflag_hw(x)) \
- ipipe_trace_begin(0x80000001); \
- } while (0)
-
-#define local_irq_restore_hw(x) \
- do { \
- if (local_test_iflag_hw(x)) { \
- ipipe_trace_end(0x80000001); \
- local_irq_enable_hw_notrace(); \
- } \
- } while (0)
-
-#define local_irq_disable_hw_notrace() \
- do { \
- int _tmp_dummy; \
- __asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \
- } while (0)
-
-#define local_irq_enable_hw_notrace() \
- __asm__ __volatile__( \
- "sti %0;" \
- : \
- : "d"(bfin_irq_flags) \
- )
-
-#define local_irq_save_hw_notrace(x) __save_and_cli_hw(x)
-
-#define local_irq_restore_hw_notrace(x) \
- do { \
- if (local_test_iflag_hw(x)) \
- local_irq_enable_hw_notrace(); \
- } while (0)
-
-#else /* CONFIG_IPIPE_TRACE_IRQSOFF */
-
-#define local_irq_enable_hw() \
- __asm__ __volatile__( \
- "sti %0;" \
- : \
- : "d"(bfin_irq_flags) \
- )
-
-#define local_irq_disable_hw() \
- do { \
- int _tmp_dummy; \
- __asm__ __volatile__ ( \
- "cli %0;" \
- : "=d" (_tmp_dummy)); \
- } while (0)
-
-#define local_irq_restore_hw(x) \
- do { \
- if (irqs_enabled_from_flags_hw(x)) \
- local_irq_enable_hw(); \
- } while (0)
-
-#define local_irq_save_hw(x) __save_and_cli_hw(x)
-
-#define local_irq_disable_hw_notrace() local_irq_disable_hw()
-#define local_irq_enable_hw_notrace() local_irq_enable_hw()
-#define local_irq_save_hw_notrace(x) local_irq_save_hw(x)
-#define local_irq_restore_hw_notrace(x) local_irq_restore_hw(x)
-
-#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
-
-#else /* !CONFIG_IPIPE */
-
-/*
- * Interrupt configuring macros.
- */
-#define local_irq_disable() \
- do { \
- int __tmp_dummy; \
- __asm__ __volatile__( \
- "cli %0;" \
- : "=d" (__tmp_dummy) \
- ); \
- } while (0)
-
-#define local_irq_enable() \
- __asm__ __volatile__( \
- "sti %0;" \
- : \
- : "d" (bfin_irq_flags) \
- )
-
-#ifdef CONFIG_DEBUG_HWERR
-# define __save_and_cli(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- "sti %1;" \
- : "=&d" (x) \
- : "d" (0x3F) \
- )
-#else
-# define __save_and_cli(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- : "=&d" (x) \
- )
-#endif
-
-#define local_save_flags(x) \
- __asm__ __volatile__( \
- "cli %0;" \
- "sti %0;" \
- : "=d" (x) \
- )
-
-#ifdef CONFIG_DEBUG_HWERR
-#define irqs_enabled_from_flags(x) (((x) & ~0x3f) != 0)
-#else
-#define irqs_enabled_from_flags(x) ((x) != 0x1f)
-#endif
-
-#define local_irq_restore(x) \
- do { \
- if (irqs_enabled_from_flags(x)) \
- local_irq_enable(); \
- } while (0)
-
-/* For spinlocks etc */
-#define local_irq_save(x) __save_and_cli(x)
-
-#define irqs_disabled() \
-({ \
- unsigned long flags; \
- local_save_flags(flags); \
- !irqs_enabled_from_flags(flags); \
-})
-
-#define local_irq_save_hw(x) local_irq_save(x)
-#define local_irq_restore_hw(x) local_irq_restore(x)
-#define local_irq_enable_hw() local_irq_enable()
-#define local_irq_disable_hw() local_irq_disable()
-#define irqs_disabled_hw() irqs_disabled()
+/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h> */
+#include <mach/irq.h>
-#endif /* !CONFIG_IPIPE */
+/* Xenomai IPIPE helpers */
+#define local_irq_restore_hw(x) local_irq_restore(x)
+#define local_irq_save_hw(x) local_irq_save(x)
+#define local_irq_enable_hw(x) local_irq_enable(x)
+#define local_irq_disable_hw(x) local_irq_disable(x)
+#define irqs_disabled_hw(x) irqs_disabled(x)
#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
# define NOP_PAD_ANOMALY_05000244 "nop; nop;"
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
new file mode 100644
index 000000000000..139cba4651b1
--- /dev/null
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -0,0 +1,63 @@
+/*
+ * interface to Blackfin CEC
+ *
+ * Copyright 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __ASM_BFIN_IRQFLAGS_H__
+#define __ASM_BFIN_IRQFLAGS_H__
+
+#ifdef CONFIG_SMP
+# include <asm/pda.h>
+# include <asm/processor.h>
+/* Forward decl needed due to cdef inter dependencies */
+static inline uint32_t __pure bfin_dspid(void);
+# define blackfin_core_id() (bfin_dspid() & 0xff)
+# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
+#else
+extern unsigned long bfin_irq_flags;
+#endif
+
+static inline void bfin_sti(unsigned long flags)
+{
+ asm volatile("sti %0;" : : "d" (flags));
+}
+
+static inline unsigned long bfin_cli(void)
+{
+ unsigned long flags;
+ asm volatile("cli %0;" : "=d" (flags));
+ return flags;
+}
+
+static inline void raw_local_irq_disable(void)
+{
+ bfin_cli();
+}
+static inline void raw_local_irq_enable(void)
+{
+ bfin_sti(bfin_irq_flags);
+}
+
+#define raw_local_save_flags(flags) do { (flags) = bfin_read_IMASK(); } while (0)
+
+#define raw_irqs_disabled_flags(flags) (((flags) & ~0x3f) == 0)
+
+static inline void raw_local_irq_restore(unsigned long flags)
+{
+ if (!raw_irqs_disabled_flags(flags))
+ raw_local_irq_enable();
+}
+
+static inline unsigned long __raw_local_irq_save(void)
+{
+ unsigned long flags = bfin_cli();
+#ifdef CONFIG_DEBUG_HWERR
+ bfin_sti(0x3f);
+#endif
+ return flags;
+}
+#define raw_local_irq_save(flags) do { (flags) = __raw_local_irq_save(); } while (0)
+
+#endif
diff --git a/arch/blackfin/include/asm/kmap_types.h b/arch/blackfin/include/asm/kmap_types.h
index e215f7104974..0a88622339ee 100644
--- a/arch/blackfin/include/asm/kmap_types.h
+++ b/arch/blackfin/include/asm/kmap_types.h
@@ -1,21 +1,6 @@
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
#endif
diff --git a/arch/blackfin/include/asm/mutex-dec.h b/arch/blackfin/include/asm/mutex-dec.h
deleted file mode 100644
index 0134151656af..000000000000
--- a/arch/blackfin/include/asm/mutex-dec.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * include/asm-generic/mutex-dec.h
- *
- * Generic implementation of the mutex fastpath, based on atomic
- * decrement/increment.
- */
-#ifndef _ASM_GENERIC_MUTEX_DEC_H
-#define _ASM_GENERIC_MUTEX_DEC_H
-
-/**
- * __mutex_fastpath_lock - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function MUST leave the value lower than
- * 1 even when the "1" assertion wasn't true.
- */
-static inline void
-__mutex_fastpath_lock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_dec_return(count) < 0))
- fail_fn(count);
- else
- smp_mb();
-}
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
- * or anything the slow path function returns.
- */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count, fastcall int (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_dec_return(count) < 0))
- return fail_fn(count);
- else {
- smp_mb();
- return 0;
- }
-}
-
-/**
- * __mutex_fastpath_unlock - try to promote the count from 0 to 1
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 0
- *
- * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
- * In the failure case, this function is allowed to either set the value to
- * 1, or to set it to a value lower than 1.
- *
- * If the implementation sets it to a value of lower than 1, then the
- * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
- * to return 0 otherwise.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, fastcall void (*fail_fn)(atomic_t *))
-{
- smp_mb();
- if (unlikely(atomic_inc_return(count) <= 0))
- fail_fn(count);
-}
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: fallback function
- *
- * Change the count from 1 to a value lower than 1, and return 0 (failure)
- * if it wasn't 1 originally, or return 1 (success) otherwise. This function
- * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
- * Additionally, if the value was < 0 originally, this function must not leave
- * it to 0 on failure.
- *
- * If the architecture has no effective trylock variant, it should call the
- * <fail_fn> spinlock-based trylock variant unconditionally.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- /*
- * We have two variants here. The cmpxchg based one is the best one
- * because it never induce a false contention state. It is included
- * here because architectures using the inc/dec algorithms over the
- * xchg ones are much more likely to support cmpxchg natively.
- *
- * If not we fall back to the spinlock based variant - that is
- * just as efficient (and simpler) as a 'destructive' probing of
- * the mutex state would be.
- */
-#ifdef __HAVE_ARCH_CMPXCHG
- if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
- smp_mb();
- return 1;
- }
- return 0;
-#else
- return fail_fn(count);
-#endif
-}
-
-#endif
diff --git a/arch/blackfin/include/asm/sections.h b/arch/blackfin/include/asm/sections.h
index 1443c3353a8c..e7fd0ecd73f7 100644
--- a/arch/blackfin/include/asm/sections.h
+++ b/arch/blackfin/include/asm/sections.h
@@ -4,4 +4,15 @@
/* nothing to see, move along */
#include <asm-generic/sections.h>
+/* only used when MTD_UCLINUX */
+extern unsigned long memory_mtd_start, memory_mtd_end, mtd_size;
+
+extern unsigned long _ramstart, _ramend, _rambase;
+extern unsigned long memory_start, memory_end, physical_mem_end;
+
+extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
+ _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
+ _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[],
+ _ebss_l2[], _l2_lma_start[];
+
#endif
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h
index a4c8254bec55..294dbda24164 100644
--- a/arch/blackfin/include/asm/system.h
+++ b/arch/blackfin/include/asm/system.h
@@ -35,10 +35,10 @@
#define _BLACKFIN_SYSTEM_H
#include <linux/linkage.h>
-#include <linux/compiler.h>
+#include <linux/irqflags.h>
#include <mach/anomaly.h>
+#include <asm/cache.h>
#include <asm/pda.h>
-#include <asm/processor.h>
#include <asm/irq.h>
/*
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index cf5066d3efd2..da35133c171d 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -380,8 +380,9 @@
#define __NR_inotify_init1 365
#define __NR_preadv 366
#define __NR_pwritev 367
+#define __NR_rt_tgsigqueueinfo 368
-#define __NR_syscall 368
+#define __NR_syscall 369
#define NR_syscalls __NR_syscall
/* Old optional stuff no one actually uses */
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index fd4d4328a0f2..3731088e181b 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -15,6 +15,10 @@ else
obj-y += time.o
endif
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+CFLAGS_REMOVE_ftrace.o = -pg
+
obj-$(CONFIG_IPIPE) += ipipe.o
obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o
obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
@@ -23,6 +27,7 @@ obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
# the kgdb test puts code into L2 and without linker
# relaxation, we need to force long calls to/from it
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 763ed84ba459..e0bf8cc06907 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -453,10 +453,10 @@ void *dma_memcpy(void *pdst, const void *psrc, size_t size)
unsigned long src = (unsigned long)psrc;
size_t bulk, rest;
- if (bfin_addr_dcachable(src))
+ if (bfin_addr_dcacheable(src))
blackfin_dcache_flush_range(src, src + size);
- if (bfin_addr_dcachable(dst))
+ if (bfin_addr_dcacheable(dst))
blackfin_dcache_invalidate_range(dst, dst + size);
bulk = size & ~0xffff;
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index 53e893ff708a..aa05e638fb7c 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -103,3 +103,8 @@ EXPORT_SYMBOL(__raw_smp_mark_barrier_asm);
EXPORT_SYMBOL(__raw_smp_check_barrier_asm);
#endif
#endif
+
+#ifdef CONFIG_FUNCTION_TRACER
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 87463ce87f5a..784923e52a9a 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -151,7 +151,7 @@ static noinline int dcplb_miss(unsigned int cpu)
d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
#ifdef CONFIG_BFIN_DCACHE
- if (bfin_addr_dcachable(addr)) {
+ if (bfin_addr_dcacheable(addr)) {
d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#ifdef CONFIG_BFIN_WT
d_data |= CPLB_L1_AOW | CPLB_WT;
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
index 8cbb47c7b663..12b030842fdb 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
@@ -28,6 +28,7 @@
#include <asm/cplbinit.h>
#include <asm/cplb.h>
#include <asm/mmu_context.h>
+#include <asm/traps.h>
/*
* WARNING
@@ -100,28 +101,6 @@ static inline void write_icplb_data(int cpu, int idx, unsigned long data,
#endif
}
-/*
- * Given the contents of the status register, return the index of the
- * CPLB that caused the fault.
- */
-static inline int faulting_cplb_index(int status)
-{
- int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
- return 30 - signbits;
-}
-
-/*
- * Given the contents of the status register and the DCPLB_DATA contents,
- * return true if a write access should be permitted.
- */
-static inline int write_permitted(int status, unsigned long data)
-{
- if (status & FAULT_USERSUPV)
- return !!(data & CPLB_SUPV_WR);
- else
- return !!(data & CPLB_USER_WR);
-}
-
/* Counters to implement round-robin replacement. */
static int icplb_rr_index[NR_CPUS] PDT_ATTR;
static int dcplb_rr_index[NR_CPUS] PDT_ATTR;
@@ -245,43 +224,16 @@ MGR_ATTR static int dcplb_miss(int cpu)
return CPLB_RELOADED;
}
-MGR_ATTR static noinline int dcplb_protection_fault(int cpu)
-{
- int status = bfin_read_DCPLB_STATUS();
-
- nr_dcplb_prot[cpu]++;
-
- if (likely(status & FAULT_RW)) {
- int idx = faulting_cplb_index(status);
- unsigned long regaddr = DCPLB_DATA0 + idx * 4;
- unsigned long data = bfin_read32(regaddr);
-
- /* Check if fault is to dirty a clean page */
- if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
- write_permitted(status, data)) {
-
- dcplb_tbl[cpu][idx].data = data;
- bfin_write32(regaddr, data);
- return CPLB_RELOADED;
- }
- }
-
- return CPLB_PROT_VIOL;
-}
-
MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
{
int cause = seqstat & 0x3f;
unsigned int cpu = smp_processor_id();
switch (cause) {
- case 0x2C:
+ case VEC_CPLB_I_M:
return icplb_miss(cpu);
- case 0x26:
+ case VEC_CPLB_M:
return dcplb_miss(cpu);
default:
- if (unlikely(cause == 0x23))
- return dcplb_protection_fault(cpu);
-
return CPLB_UNKNOWN_ERR;
}
}
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c
index 3302719173ca..2ab56811841c 100644
--- a/arch/blackfin/kernel/early_printk.c
+++ b/arch/blackfin/kernel/early_printk.c
@@ -202,11 +202,15 @@ asmlinkage void __init init_early_exception_vectors(void)
asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr)
{
/* This can happen before the uart is initialized, so initialize
- * the UART now
+ * the UART now (but only if we are running on the processor we think
+ * we are compiled for - otherwise we write to MMRs that don't exist,
+ * and cause other problems. Nothing comes out the UART, but it does
+ * end up in the __buf_log.
*/
- if (likely(early_console == NULL))
+ if (likely(early_console == NULL) && CPUID == bfin_cpuid())
setup_early_printk(DEFAULT_EARLY_PORT);
+ printk(KERN_EMERG "Early panic\n");
dump_bfin_mem(fp);
show_regs(fp);
dump_bfin_trace_buffer();
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S
new file mode 100644
index 000000000000..6980b7a0615d
--- /dev/null
+++ b/arch/blackfin/kernel/ftrace-entry.S
@@ -0,0 +1,140 @@
+/*
+ * mcount and friends -- ftrace stuff
+ *
+ * Copyright (C) 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/linkage.h>
+#include <asm/ftrace.h>
+
+.text
+
+/* GCC will have called us before setting up the function prologue, so we
+ * can clobber the normal scratch registers, but we need to make sure to
+ * save/restore the registers used for argument passing (R0-R2) in case
+ * the profiled function is using them. With data registers, R3 is the
+ * only one we can blow away. With pointer registers, we have P0-P2.
+ *
+ * Upon entry, the RETS will point to the top of the current profiled
+ * function. And since GCC setup the frame for us, the previous function
+ * will be waiting there. mmmm pie.
+ */
+ENTRY(__mcount)
+ /* save third function arg early so we can do testing below */
+ [--sp] = r2;
+
+ /* load the function pointer to the tracer */
+ p0.l = _ftrace_trace_function;
+ p0.h = _ftrace_trace_function;
+ r3 = [p0];
+
+ /* optional micro optimization: don't call the stub tracer */
+ r2.l = _ftrace_stub;
+ r2.h = _ftrace_stub;
+ cc = r2 == r3;
+ if ! cc jump .Ldo_trace;
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* if the ftrace_graph_return function pointer is not set to
+ * the ftrace_stub entry, call prepare_ftrace_return().
+ */
+ p0.l = _ftrace_graph_return;
+ p0.h = _ftrace_graph_return;
+ r3 = [p0];
+ cc = r2 == r3;
+ if ! cc jump _ftrace_graph_caller;
+
+ /* similarly, if the ftrace_graph_entry function pointer is not
+ * set to the ftrace_graph_entry_stub entry, ...
+ */
+ p0.l = _ftrace_graph_entry;
+ p0.h = _ftrace_graph_entry;
+ r2.l = _ftrace_graph_entry_stub;
+ r2.h = _ftrace_graph_entry_stub;
+ r3 = [p0];
+ cc = r2 == r3;
+ if ! cc jump _ftrace_graph_caller;
+#endif
+
+ r2 = [sp++];
+ rts;
+
+.Ldo_trace:
+
+ /* save first/second function arg and the return register */
+ [--sp] = r0;
+ [--sp] = r1;
+ [--sp] = rets;
+
+ /* setup the tracer function */
+ p0 = r3;
+
+ /* tracer(ulong frompc, ulong selfpc):
+ * frompc: the pc that did the call to ...
+ * selfpc: ... this location
+ * the selfpc itself will need adjusting for the mcount call
+ */
+ r1 = rets;
+ r0 = [fp + 4];
+ r1 += -MCOUNT_INSN_SIZE;
+
+ /* call the tracer */
+ call (p0);
+
+ /* restore state and get out of dodge */
+.Lfinish_trace:
+ rets = [sp++];
+ r1 = [sp++];
+ r0 = [sp++];
+ r2 = [sp++];
+
+.globl _ftrace_stub
+_ftrace_stub:
+ rts;
+ENDPROC(__mcount)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/* The prepare_ftrace_return() function is similar to the trace function
+ * except it takes a pointer to the location of the frompc. This is so
+ * the prepare_ftrace_return() can hijack it temporarily for probing
+ * purposes.
+ */
+ENTRY(_ftrace_graph_caller)
+ /* save first/second function arg and the return register */
+ [--sp] = r0;
+ [--sp] = r1;
+ [--sp] = rets;
+
+ r0 = fp;
+ r1 = rets;
+ r0 += 4;
+ r1 += -MCOUNT_INSN_SIZE;
+ call _prepare_ftrace_return;
+
+ jump .Lfinish_trace;
+ENDPROC(_ftrace_graph_caller)
+
+/* Undo the rewrite caused by ftrace_graph_caller(). The common function
+ * ftrace_return_to_handler() will return the original rets so we can
+ * restore it and be on our way.
+ */
+ENTRY(_return_to_handler)
+ /* make sure original return values are saved */
+ [--sp] = p0;
+ [--sp] = r0;
+ [--sp] = r1;
+
+ /* get original return address */
+ call _ftrace_return_to_handler;
+ rets = r0;
+
+ /* anomaly 05000371 - make sure we have at least three instructions
+ * between rets setting and the return
+ */
+ r1 = [sp++];
+ r0 = [sp++];
+ p0 = [sp++];
+ rts;
+ENDPROC(_return_to_handler)
+#endif
diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c
new file mode 100644
index 000000000000..905bfc40a00b
--- /dev/null
+++ b/arch/blackfin/kernel/ftrace.c
@@ -0,0 +1,42 @@
+/*
+ * ftrace graph code
+ *
+ * Copyright (C) 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/ftrace.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+ struct ftrace_graph_ent trace;
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ if (ftrace_push_return_trace(*parent, self_addr, &trace.depth) == -EBUSY)
+ return;
+
+ trace.func = self_addr;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace)) {
+ current->curr_ret_stack--;
+ return;
+ }
+
+ /* all is well in the world ! hijack RETS ... */
+ *parent = return_hooker;
+}
+
+#endif
diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c
index 2c228c020978..c26c34de9f3c 100644
--- a/arch/blackfin/kernel/init_task.c
+++ b/arch/blackfin/kernel/init_task.c
@@ -35,10 +35,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-
-struct mm_struct init_mm = INIT_MM(init_mm);
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial task structure.
*
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index 5fc424803a17..d8cde1fc5cb9 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -99,7 +99,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
* interrupt.
*/
m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
- this_domain = ipipe_current_domain;
+ this_domain = __ipipe_current_domain;
if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
head = &this_domain->p_link;
@@ -212,7 +212,9 @@ void __ipipe_unstall_root_raw(void)
int __ipipe_syscall_root(struct pt_regs *regs)
{
+ struct ipipe_percpu_domain_data *p;
unsigned long flags;
+ int ret;
/*
* We need to run the IRQ tail hook whenever we don't
@@ -231,29 +233,31 @@ int __ipipe_syscall_root(struct pt_regs *regs)
/*
* This routine either returns:
* 0 -- if the syscall is to be passed to Linux;
- * 1 -- if the syscall should not be passed to Linux, and no
+ * >0 -- if the syscall should not be passed to Linux, and no
* tail work should be performed;
- * -1 -- if the syscall should not be passed to Linux but the
+ * <0 -- if the syscall should not be passed to Linux but the
* tail work has to be performed (for handling signals etc).
*/
- if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) &&
- __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) {
- if (ipipe_root_domain_p && !in_atomic()) {
- /*
- * Sync pending VIRQs before _TIF_NEED_RESCHED
- * is tested.
- */
- local_irq_save_hw(flags);
- if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0)
- __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
- local_irq_restore_hw(flags);
- return -1;
- }
+ if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
+ return 0;
+
+ ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);
+
+ local_irq_save_hw(flags);
+
+ if (!__ipipe_root_domain_p) {
+ local_irq_restore_hw(flags);
return 1;
}
- return 0;
+ p = ipipe_root_cpudom_ptr();
+ if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0)
+ __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
+
+ local_irq_restore_hw(flags);
+
+ return -ret;
}
unsigned long ipipe_critical_enter(void (*syncfn) (void))
@@ -329,9 +333,7 @@ asmlinkage void __ipipe_sync_root(void)
void ___ipipe_sync_pipeline(unsigned long syncmask)
{
- struct ipipe_domain *ipd = ipipe_current_domain;
-
- if (ipd == ipipe_root_domain) {
+ if (__ipipe_root_domain_p) {
if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
return;
}
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 80447f99c2b5..6454babdfaff 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -1098,7 +1098,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
CPUID, bfin_cpuid());
seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
- "stepping\t: %d\n",
+ "stepping\t: %d ",
cpu, cclk/1000000, sclk/1000000,
#ifdef CONFIG_MPU
"mpu on",
@@ -1107,7 +1107,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#endif
revid);
- seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
+ if (bfin_revid() != bfin_compiled_revid()) {
+ if (bfin_compiled_revid() == -1)
+ seq_printf(m, "(Compiled for Rev none)");
+ else if (bfin_compiled_revid() == 0xffff)
+ seq_printf(m, "(Compiled for Rev any)");
+ else
+ seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
+ }
+
+ seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
cclk/1000000, cclk%1000000,
sclk/1000000, sclk%1000000);
seq_printf(m, "bogomips\t: %lu.%02lu\n"
@@ -1172,6 +1181,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef __ARCH_SYNC_CORE_DCACHE
seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count);
#endif
+#ifdef __ARCH_SYNC_CORE_ICACHE
+ seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count);
+#endif
#ifdef CONFIG_BFIN_ICACHE_LOCK
switch ((cpudata->imemctl >> 3) & WAYALL_L) {
case WAY0_L:
diff --git a/arch/blackfin/kernel/stacktrace.c b/arch/blackfin/kernel/stacktrace.c
new file mode 100644
index 000000000000..30301e1eace5
--- /dev/null
+++ b/arch/blackfin/kernel/stacktrace.c
@@ -0,0 +1,53 @@
+/*
+ * Blackfin stacktrace code (mostly copied from avr32)
+ *
+ * Copyright 2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/thread_info.h>
+#include <linux/module.h>
+
+register unsigned long current_frame_pointer asm("FP");
+
+struct stackframe {
+ unsigned long fp;
+ unsigned long rets;
+};
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace)
+{
+ unsigned long low, high;
+ unsigned long fp;
+ struct stackframe *frame;
+ int skip = trace->skip;
+
+ low = (unsigned long)task_stack_page(current);
+ high = low + THREAD_SIZE;
+ fp = current_frame_pointer;
+
+ while (fp >= low && fp <= (high - sizeof(*frame))) {
+ frame = (struct stackframe *)fp;
+
+ if (skip) {
+ skip--;
+ } else {
+ trace->entries[trace->nr_entries++] = frame->rets;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+
+ /*
+ * The next frame must be at a higher address than the
+ * current frame.
+ */
+ low = fp + sizeof(*frame);
+ fp = frame->fp;
+ }
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index aa76dfb0226e..d279552fe9b0 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -27,6 +27,7 @@
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <linux/bug.h>
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -238,6 +239,11 @@ asmlinkage void double_fault_c(struct pt_regs *fp)
}
+static int kernel_mode_regs(struct pt_regs *regs)
+{
+ return regs->ipend & 0xffc0;
+}
+
asmlinkage void trap_c(struct pt_regs *fp)
{
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
@@ -246,6 +252,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
unsigned int cpu = smp_processor_id();
#endif
+ const char *strerror = NULL;
int sig = 0;
siginfo_t info;
unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE;
@@ -259,27 +266,10 @@ asmlinkage void trap_c(struct pt_regs *fp)
* double faults if the stack has become corrupt
*/
- /* If the fault was caused by a kernel thread, or interrupt handler
- * we will kernel panic, so the system reboots.
- * If KGDB is enabled, don't set this for kernel breakpoints
- */
-
- /* TODO: check to see if we are in some sort of deferred HWERR
- * that we should be able to recover from, not kernel panic
- */
- if ((bfin_read_IPEND() & 0xFFC0) && (trapnr != VEC_STEP)
-#ifdef CONFIG_KGDB
- && (trapnr != VEC_EXCPT02)
+#ifndef CONFIG_KGDB
+ /* IPEND is skipped if KGDB isn't enabled (see entry code) */
+ fp->ipend = bfin_read_IPEND();
#endif
- ){
- console_verbose();
- oops_in_progress = 1;
- } else if (current) {
- if (current->mm == NULL) {
- console_verbose();
- oops_in_progress = 1;
- }
- }
/* trap_c() will be called for exceptions. During exceptions
* processing, the pc value should be set with retx value.
@@ -307,15 +297,15 @@ asmlinkage void trap_c(struct pt_regs *fp)
sig = SIGTRAP;
CHK_DEBUGGER_TRAP_MAYBE();
/* Check if this is a breakpoint in kernel space */
- if (fp->ipend & 0xffc0)
- return;
+ if (kernel_mode_regs(fp))
+ goto traps_done;
else
break;
/* 0x03 - User Defined, userspace stack overflow */
case VEC_EXCPT03:
info.si_code = SEGV_STACKFLOW;
sig = SIGSEGV;
- verbose_printk(KERN_NOTICE EXC_0x03(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x03(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x02 - KGDB initial connection and break signal trap */
@@ -324,7 +314,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
info.si_code = TRAP_ILLTRAP;
sig = SIGTRAP;
CHK_DEBUGGER_TRAP();
- return;
+ goto traps_done;
#endif
/* 0x04 - User Defined */
/* 0x05 - User Defined */
@@ -344,7 +334,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
case VEC_EXCPT04 ... VEC_EXCPT15:
info.si_code = ILL_ILLPARAOP;
sig = SIGILL;
- verbose_printk(KERN_NOTICE EXC_0x04(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x04(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x10 HW Single step, handled here */
@@ -353,15 +343,15 @@ asmlinkage void trap_c(struct pt_regs *fp)
sig = SIGTRAP;
CHK_DEBUGGER_TRAP_MAYBE();
/* Check if this is a single step in kernel space */
- if (fp->ipend & 0xffc0)
- return;
+ if (kernel_mode_regs(fp))
+ goto traps_done;
else
break;
/* 0x11 - Trace Buffer Full, handled here */
case VEC_OVFLOW:
info.si_code = TRAP_TRACEFLOW;
sig = SIGTRAP;
- verbose_printk(KERN_NOTICE EXC_0x11(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x11(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x12 - Reserved, Caught by default */
@@ -381,37 +371,54 @@ asmlinkage void trap_c(struct pt_regs *fp)
/* 0x20 - Reserved, Caught by default */
/* 0x21 - Undefined Instruction, handled here */
case VEC_UNDEF_I:
+#ifdef CONFIG_BUG
+ if (kernel_mode_regs(fp)) {
+ switch (report_bug(fp->pc, fp)) {
+ case BUG_TRAP_TYPE_NONE:
+ break;
+ case BUG_TRAP_TYPE_WARN:
+ dump_bfin_trace_buffer();
+ fp->pc += 2;
+ goto traps_done;
+ case BUG_TRAP_TYPE_BUG:
+ /* call to panic() will dump trace, and it is
+ * off at this point, so it won't be clobbered
+ */
+ panic("BUG()");
+ }
+ }
+#endif
info.si_code = ILL_ILLOPC;
sig = SIGILL;
- verbose_printk(KERN_NOTICE EXC_0x21(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x22 - Illegal Instruction Combination, handled here */
case VEC_ILGAL_I:
info.si_code = ILL_ILLPARAOP;
sig = SIGILL;
- verbose_printk(KERN_NOTICE EXC_0x22(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x22(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x23 - Data CPLB protection violation, handled here */
case VEC_CPLB_VL:
info.si_code = ILL_CPLB_VI;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE EXC_0x23(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x24 - Data access misaligned, handled here */
case VEC_MISALI_D:
info.si_code = BUS_ADRALN;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE EXC_0x24(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x24(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x25 - Unrecoverable Event, handled here */
case VEC_UNCOV:
info.si_code = ILL_ILLEXCPT;
sig = SIGILL;
- verbose_printk(KERN_NOTICE EXC_0x25(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x25(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr,
@@ -419,7 +426,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
case VEC_CPLB_M:
info.si_code = BUS_ADRALN;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE EXC_0x26(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x26(KERN_NOTICE);
break;
/* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */
case VEC_CPLB_MHIT:
@@ -427,10 +434,10 @@ asmlinkage void trap_c(struct pt_regs *fp)
sig = SIGSEGV;
#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START)
- verbose_printk(KERN_NOTICE "NULL pointer access\n");
+ strerror = KERN_NOTICE "NULL pointer access\n";
else
#endif
- verbose_printk(KERN_NOTICE EXC_0x27(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x27(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x28 - Emulation Watchpoint, handled here */
@@ -440,8 +447,8 @@ asmlinkage void trap_c(struct pt_regs *fp)
pr_debug(EXC_0x28(KERN_DEBUG));
CHK_DEBUGGER_TRAP_MAYBE();
/* Check if this is a watchpoint in kernel space */
- if (fp->ipend & 0xffc0)
- return;
+ if (kernel_mode_regs(fp))
+ goto traps_done;
else
break;
#ifdef CONFIG_BF535
@@ -449,7 +456,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */
info.si_code = BUS_OPFETCH;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE "BF535: VEC_ISTRU_VL\n");
+ strerror = KERN_NOTICE "BF535: VEC_ISTRU_VL\n";
CHK_DEBUGGER_TRAP_MAYBE();
break;
#else
@@ -459,21 +466,21 @@ asmlinkage void trap_c(struct pt_regs *fp)
case VEC_MISALI_I:
info.si_code = BUS_ADRALN;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE EXC_0x2A(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x2A(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x2B - Instruction CPLB protection violation, handled here */
case VEC_CPLB_I_VL:
info.si_code = ILL_CPLB_VI;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE EXC_0x2B(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x2B(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */
case VEC_CPLB_I_M:
info.si_code = ILL_CPLB_MISS;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE EXC_0x2C(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x2C(KERN_NOTICE);
break;
/* 0x2D - Instruction CPLB Multiple Hits, handled here */
case VEC_CPLB_I_MHIT:
@@ -481,17 +488,17 @@ asmlinkage void trap_c(struct pt_regs *fp)
sig = SIGSEGV;
#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START)
- verbose_printk(KERN_NOTICE "Jump to NULL address\n");
+ strerror = KERN_NOTICE "Jump to NULL address\n";
else
#endif
- verbose_printk(KERN_NOTICE EXC_0x2D(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x2D(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x2E - Illegal use of Supervisor Resource, handled here */
case VEC_ILL_RES:
info.si_code = ILL_PRVOPC;
sig = SIGILL;
- verbose_printk(KERN_NOTICE EXC_0x2E(KERN_NOTICE));
+ strerror = KERN_NOTICE EXC_0x2E(KERN_NOTICE);
CHK_DEBUGGER_TRAP_MAYBE();
break;
/* 0x2F - Reserved, Caught by default */
@@ -519,17 +526,17 @@ asmlinkage void trap_c(struct pt_regs *fp)
case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR):
info.si_code = BUS_ADRALN;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE HWC_x2(KERN_NOTICE));
+ strerror = KERN_NOTICE HWC_x2(KERN_NOTICE);
break;
/* External Memory Addressing Error */
case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR):
info.si_code = BUS_ADRERR;
sig = SIGBUS;
- verbose_printk(KERN_NOTICE HWC_x3(KERN_NOTICE));
+ strerror = KERN_NOTICE HWC_x3(KERN_NOTICE);
break;
/* Performance Monitor Overflow */
case (SEQSTAT_HWERRCAUSE_PERF_FLOW):
- verbose_printk(KERN_NOTICE HWC_x12(KERN_NOTICE));
+ strerror = KERN_NOTICE HWC_x12(KERN_NOTICE);
break;
/* RAISE 5 instruction */
case (SEQSTAT_HWERRCAUSE_RAISE_5):
@@ -546,7 +553,6 @@ asmlinkage void trap_c(struct pt_regs *fp)
* if we get here we hit a reserved one, so panic
*/
default:
- oops_in_progress = 1;
info.si_code = ILL_ILLPARAOP;
sig = SIGILL;
verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n",
@@ -557,6 +563,16 @@ asmlinkage void trap_c(struct pt_regs *fp)
BUG_ON(sig == 0);
+ /* If the fault was caused by a kernel thread, or interrupt handler
+ * we will kernel panic, so the system reboots.
+ */
+ if (kernel_mode_regs(fp) || (current && !current->mm)) {
+ console_verbose();
+ oops_in_progress = 1;
+ if (strerror)
+ verbose_printk(strerror);
+ }
+
if (sig != SIGTRAP) {
dump_bfin_process(fp);
dump_bfin_mem(fp);
@@ -606,8 +622,8 @@ asmlinkage void trap_c(struct pt_regs *fp)
if (ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8))
fp->pc = SAFE_USER_INSTRUCTION;
+ traps_done:
trace_buffer_restore(j);
- return;
}
/* Typical exception handling routines */
@@ -792,6 +808,18 @@ void dump_bfin_trace_buffer(void)
}
EXPORT_SYMBOL(dump_bfin_trace_buffer);
+#ifdef CONFIG_BUG
+int is_valid_bugaddr(unsigned long addr)
+{
+ unsigned short opcode;
+
+ if (!get_instruction(&opcode, (unsigned short *)addr))
+ return 0;
+
+ return opcode == BFIN_BUG_OPCODE;
+}
+#endif
+
/*
* Checks to see if the address pointed to is either a
* 16-bit CALL instruction, or a 32-bit CALL instruction
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 8b67167cb4f4..6ac307ca0d80 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -54,6 +54,7 @@ SECTIONS
SCHED_TEXT
#endif
LOCK_TEXT
+ IRQENTRY_TEXT
KPROBES_TEXT
*(.text.*)
*(.fixup)
@@ -166,6 +167,20 @@ SECTIONS
}
PERCPU(4)
SECURITY_INIT
+
+ /* we have to discard exit text and such at runtime, not link time, to
+ * handle embedded cross-section references (alt instructions, bug
+ * table, eh_frame, etc...)
+ */
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
+ .exit.data :
+ {
+ EXIT_DATA
+ }
+
.init.ramfs :
{
. = ALIGN(4);
@@ -264,8 +279,6 @@ SECTIONS
/DISCARD/ :
{
- EXIT_TEXT
- EXIT_DATA
*(.exitcall.exit)
}
}
diff --git a/arch/blackfin/lib/checksum.c b/arch/blackfin/lib/checksum.c
index 762a7f02970a..cd605e7d8518 100644
--- a/arch/blackfin/lib/checksum.c
+++ b/arch/blackfin/lib/checksum.c
@@ -116,6 +116,7 @@ __sum16 ip_compute_csum(const void *buff, int len)
{
return (__force __sum16)~do_csum(buff, len);
}
+EXPORT_SYMBOL(ip_compute_csum);
/*
* copy from fs while checksumming, otherwise like csum_partial
@@ -130,6 +131,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
memcpy(dst, (__force void *)src, len);
return csum_partial(dst, len, sum);
}
+EXPORT_SYMBOL(csum_partial_copy_from_user);
/*
* copy from ds while checksumming, otherwise like csum_partial
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index 62bba09bcce6..1382f0382359 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -246,7 +246,7 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
- .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
+ .chip_select = 2, /* On BF518F-EZBRD it's SPI0_SSEL2 */
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
@@ -369,6 +369,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI0,
.end = CH_SPI0,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI0,
+ .end = IRQ_SPI0,
.flags = IORESOURCE_IRQ,
},
};
@@ -399,6 +404,11 @@ static struct resource bfin_spi1_resource[] = {
[1] = {
.start = CH_SPI1,
.end = CH_SPI1,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI1,
+ .end = IRQ_SPI1,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 6d6f9effa0bb..1eaf27ff722e 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -664,6 +664,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 1435c5d38cd5..9f9c0005dcf1 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -467,6 +467,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 147edd1eb1ad..3e5b7db6b065 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -723,6 +723,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 895f213ea454..38cf8ffd6d74 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -266,6 +266,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 0765872a8ada..9ecdc361fa6d 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -162,6 +162,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index a727e538fa28..1443e92d8b62 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -160,6 +160,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index 842f1c9c2393..89a5ec4ca048 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -196,6 +196,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index e19c565ade16..a68ade8a3ca2 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -299,6 +299,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537.c b/arch/blackfin/mach-bf537/boards/cm_bf537.c
index 4fee19673127..2a87d1cfcd06 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537.c
@@ -182,8 +182,13 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
- }
+ },
};
/* SPI controller data */
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index 3c159819e555..399f81da7b93 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -184,6 +184,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index 26707ce39f29..838240f151f5 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -398,8 +398,13 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
- }
+ },
};
/* SPI controller data */
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index dfb5036f8a6b..ff7228caa7da 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -1345,7 +1345,7 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
#if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE)
{
I2C_BOARD_INFO("pmic-adp5520", 0x32),
- .irq = IRQ_PF7,
+ .irq = IRQ_PG0,
.platform_data = (void *)&adp5520_pdev_data,
},
#endif
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 280574591201..e523e6e610d0 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -182,6 +182,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index e37cb9378884..57695b4c3c09 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -352,6 +352,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI0,
.end = CH_SPI0,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI0,
+ .end = IRQ_SPI0,
.flags = IORESOURCE_IRQ,
}
};
@@ -366,6 +371,11 @@ static struct resource bfin_spi1_resource[] = {
[1] = {
.start = CH_SPI1,
.end = CH_SPI1,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI1,
+ .end = IRQ_SPI1,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index f53ad682530b..f5a3c30a41bd 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -612,6 +612,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI0,
.end = CH_SPI0,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI0,
+ .end = IRQ_SPI0,
.flags = IORESOURCE_IRQ,
}
};
@@ -626,6 +631,11 @@ static struct resource bfin_spi1_resource[] = {
[1] = {
.start = CH_SPI1,
.end = CH_SPI1,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI1,
+ .end = IRQ_SPI1,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index add5a17452ce..805a57b5e650 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -396,6 +396,8 @@ static struct platform_device bfin_sir3_device = {
#endif
#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
+#include <linux/smsc911x.h>
+
static struct resource smsc911x_resources[] = {
{
.name = "smsc911x-memory",
@@ -409,11 +411,22 @@ static struct resource smsc911x_resources[] = {
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
},
};
+
+static struct smsc911x_platform_config smsc911x_config = {
+ .flags = SMSC911X_USE_32BIT,
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+ .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
static struct platform_device smsc911x_device = {
.name = "smsc911x",
.id = 0,
.num_resources = ARRAY_SIZE(smsc911x_resources),
.resource = smsc911x_resources,
+ .dev = {
+ .platform_data = &smsc911x_config,
+ },
};
#endif
@@ -741,6 +754,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI0,
.end = CH_SPI0,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI0,
+ .end = IRQ_SPI0,
.flags = IORESOURCE_IRQ,
}
};
@@ -755,6 +773,11 @@ static struct resource bfin_spi1_resource[] = {
[1] = {
.start = CH_SPI1,
.end = CH_SPI1,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI1,
+ .end = IRQ_SPI1,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 0dd9685e5d53..0c9d72c5f5ba 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -177,8 +177,13 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
- }
+ },
};
/* SPI controller data */
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 0e2178a1aec5..b5ef7ff7b7bd 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -304,6 +304,11 @@ static struct resource bfin_spi0_resource[] = {
[1] = {
.start = CH_SPI,
.end = CH_SPI,
+ .flags = IORESOURCE_DMA,
+ },
+ [2] = {
+ .start = IRQ_SPI,
+ .end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
}
};
diff --git a/arch/blackfin/mach-common/cache-c.c b/arch/blackfin/mach-common/cache-c.c
index e6ab1f815123..b59ce3cb3807 100644
--- a/arch/blackfin/mach-common/cache-c.c
+++ b/arch/blackfin/mach-common/cache-c.c
@@ -16,9 +16,21 @@
void blackfin_invalidate_entire_dcache(void)
{
u32 dmem = bfin_read_DMEM_CONTROL();
- SSYNC();
bfin_write_DMEM_CONTROL(dmem & ~0xc);
SSYNC();
bfin_write_DMEM_CONTROL(dmem);
SSYNC();
}
+
+/* Invalidate the Entire Instruction cache by
+ * clearing IMC bit
+ */
+void blackfin_invalidate_entire_icache(void)
+{
+ u32 imem = bfin_read_IMEM_CONTROL();
+ bfin_write_IMEM_CONTROL(imem & ~0x4);
+ SSYNC();
+ bfin_write_IMEM_CONTROL(imem);
+ SSYNC();
+}
+
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index da0558ad1b1a..31fa313e81cf 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -42,6 +42,7 @@
#include <asm/thread_info.h> /* TIF_NEED_RESCHED */
#include <asm/asm-offsets.h>
#include <asm/trace.h>
+#include <asm/traps.h>
#include <asm/context.S>
@@ -84,13 +85,15 @@ ENTRY(_ex_workaround_261)
if !cc jump _bfin_return_from_exception;
/* fall through */
R7 = P4;
- R6 = 0x26; /* Data CPLB Miss */
+ R6 = VEC_CPLB_M; /* Data CPLB Miss */
cc = R6 == R7;
if cc jump _ex_dcplb_miss (BP);
- R6 = 0x23; /* Data CPLB Miss */
+#ifdef CONFIG_MPU
+ R6 = VEC_CPLB_VL; /* Data CPLB Violation */
cc = R6 == R7;
if cc jump _ex_dcplb_viol (BP);
- /* Handle 0x23 Data CPLB Protection Violation
+#endif
+ /* Handle Data CPLB Protection Violation
* and Data CPLB Multiple Hits - Linux Trap Zero
*/
jump _ex_trap_c;
@@ -270,7 +273,7 @@ ENTRY(_bfin_return_from_exception)
r6.l = lo(SEQSTAT_EXCAUSE);
r6.h = hi(SEQSTAT_EXCAUSE);
r7 = r7 & r6;
- r6 = 0x25;
+ r6 = VEC_UNCOV;
CC = R7 == R6;
if CC JUMP _double_fault;
#endif
@@ -1605,6 +1608,7 @@ ENTRY(_sys_call_table)
.long _sys_inotify_init1 /* 365 */
.long _sys_preadv
.long _sys_pwritev
+ .long _sys_rt_tgsigqueueinfo
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 3b8ebaee77f2..61840059dfac 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -144,7 +144,7 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
static irqreturn_t ipi_handler(int irq, void *dev_instance)
{
- struct ipi_message *msg, *mg;
+ struct ipi_message *msg;
struct ipi_message_queue *msg_queue;
unsigned int cpu = smp_processor_id();
@@ -154,7 +154,8 @@ static irqreturn_t ipi_handler(int irq, void *dev_instance)
msg_queue->count++;
spin_lock(&msg_queue->lock);
- list_for_each_entry_safe(msg, mg, &msg_queue->head, list) {
+ while (!list_empty(&msg_queue->head)) {
+ msg = list_entry(msg_queue->head.next, typeof(*msg), list);
list_del(&msg->list);
switch (msg->type) {
case BFIN_IPI_RESCHEDULE:
@@ -221,7 +222,7 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
}
@@ -261,7 +262,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
@@ -292,7 +293,7 @@ void smp_send_reschedule(int cpu)
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
@@ -320,7 +321,7 @@ void smp_send_stop(void)
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
}
@@ -468,6 +469,17 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
}
EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
+#ifdef __ARCH_SYNC_CORE_ICACHE
+void resync_core_icache(void)
+{
+ unsigned int cpu = get_cpu();
+ blackfin_invalidate_entire_icache();
+ ++per_cpu(cpu_data, cpu).icache_invld_count;
+ put_cpu();
+}
+EXPORT_SYMBOL(resync_core_icache);
+#endif
+
#ifdef __ARCH_SYNC_CORE_DCACHE
unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
diff --git a/arch/cris/include/asm/kmap_types.h b/arch/cris/include/asm/kmap_types.h
index 492988cb9077..d2d643c4ea59 100644
--- a/arch/cris/include/asm/kmap_types.h
+++ b/arch/cris/include/asm/kmap_types.h
@@ -5,21 +5,6 @@
* is actually used on cris.
*/
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
#endif
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 4df0b320d524..51dcd04d2777 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -38,10 +38,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial thread structure.
*
diff --git a/arch/frv/kernel/init_task.c b/arch/frv/kernel/init_task.c
index 29429a8b7f6a..1d3df1d9495c 100644
--- a/arch/frv/kernel/init_task.c
+++ b/arch/frv/kernel/init_task.c
@@ -12,10 +12,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial thread structure.
*
diff --git a/arch/h8300/include/asm/kmap_types.h b/arch/h8300/include/asm/kmap_types.h
index 1ec8a3427120..be12a7160116 100644
--- a/arch/h8300/include/asm/kmap_types.h
+++ b/arch/h8300/include/asm/kmap_types.h
@@ -1,21 +1,6 @@
#ifndef _ASM_H8300_KMAP_TYPES_H
#define _ASM_H8300_KMAP_TYPES_H
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
#endif
diff --git a/arch/h8300/kernel/init_task.c b/arch/h8300/kernel/init_task.c
index cb5dc552da97..089c65ed6eb3 100644
--- a/arch/h8300/kernel/init_task.c
+++ b/arch/h8300/kernel/init_task.c
@@ -14,10 +14,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial task structure.
*
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 21c04114ddd2..8cfb001092ab 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1131,7 +1131,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
#ifdef CONFIG_NUMA
{
struct page *page;
- page = alloc_pages_node(ioc->node == MAX_NUMNODES ?
+ page = alloc_pages_exact_node(ioc->node == MAX_NUMNODES ?
numa_node_id() : ioc->node, flags,
get_order(size));
diff --git a/arch/ia64/include/asm/kmap_types.h b/arch/ia64/include/asm/kmap_types.h
index 5d1658aa2b3b..05d5f9996105 100644
--- a/arch/ia64/include/asm/kmap_types.h
+++ b/arch/ia64/include/asm/kmap_types.h
@@ -1,30 +1,12 @@
#ifndef _ASM_IA64_KMAP_TYPES_H
#define _ASM_IA64_KMAP_TYPES_H
-
#ifdef CONFIG_DEBUG_HIGHMEM
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define __WITH_KM_FENCE
#endif
-enum km_type {
-D(0) KM_BOUNCE_READ,
-D(1) KM_SKB_SUNRPC_DATA,
-D(2) KM_SKB_DATA_SOFTIRQ,
-D(3) KM_USER0,
-D(4) KM_USER1,
-D(5) KM_BIO_SRC_IRQ,
-D(6) KM_BIO_DST_IRQ,
-D(7) KM_PTE0,
-D(8) KM_PTE1,
-D(9) KM_IRQ0,
-D(10) KM_IRQ1,
-D(11) KM_SOFTIRQ0,
-D(12) KM_SOFTIRQ1,
-D(13) KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
-#undef D
+#undef __WITH_KM_FENCE
#endif /* _ASM_IA64_KMAP_TYPES_H */
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index 5b0e830c6f33..c475fc281be7 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -19,10 +19,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial task structure.
*
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index c259b9467fcc..7b30d21c5190 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1829,8 +1829,7 @@ ia64_mca_cpu_init(void *cpu_data)
data = mca_bootmem();
first_time = 0;
} else
- data = page_address(alloc_pages_node(numa_node_id(),
- GFP_KERNEL, get_order(sz)));
+ data = __get_free_pages(GFP_KERNEL, get_order(sz));
if (!data)
panic("Could not allocate MCA memory for cpu %d\n",
cpu);
diff --git a/arch/ia64/kernel/paravirt_patchlist.c b/arch/ia64/kernel/paravirt_patchlist.c
index b28082a95d45..0a70720662ed 100644
--- a/arch/ia64/kernel/paravirt_patchlist.c
+++ b/arch/ia64/kernel/paravirt_patchlist.c
@@ -19,6 +19,8 @@
*/
#include <linux/bug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <asm/paravirt.h>
#define DECLARE(name) \
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 89ad0bbb8614..abce2468a40b 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -5595,7 +5595,7 @@ pfm_interrupt_handler(int irq, void *arg)
(*pfm_alt_intr_handler->handler)(irq, arg, regs);
}
- put_cpu_no_resched();
+ put_cpu();
return IRQ_HANDLED;
}
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index e6ac3c332d17..a595823582d9 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -98,7 +98,8 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
/* attempt to allocate a granule's worth of cached memory pages */
- page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ page = alloc_pages_exact_node(nid,
+ GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
IA64_GRANULE_SHIFT-PAGE_SHIFT);
if (!page) {
mutex_unlock(&uc_pool->add_chunk_mutex);
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index d876423e4e75..98b684928e12 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -90,7 +90,8 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
*/
node = pcibus_to_node(pdev->bus);
if (likely(node >=0)) {
- struct page *p = alloc_pages_node(node, flags, get_order(size));
+ struct page *p = alloc_pages_exact_node(node,
+ flags, get_order(size));
if (likely(p))
cpuaddr = page_address(p);
diff --git a/arch/m32r/include/asm/kmap_types.h b/arch/m32r/include/asm/kmap_types.h
index fa94dc6410ea..4cdb5e3a06bf 100644
--- a/arch/m32r/include/asm/kmap_types.h
+++ b/arch/m32r/include/asm/kmap_types.h
@@ -2,28 +2,11 @@
#define __M32R_KMAP_TYPES_H
#ifdef CONFIG_DEBUG_HIGHMEM
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define __WITH_KM_FENCE
#endif
-enum km_type {
-D(0) KM_BOUNCE_READ,
-D(1) KM_SKB_SUNRPC_DATA,
-D(2) KM_SKB_DATA_SOFTIRQ,
-D(3) KM_USER0,
-D(4) KM_USER1,
-D(5) KM_BIO_SRC_IRQ,
-D(6) KM_BIO_DST_IRQ,
-D(7) KM_PTE0,
-D(8) KM_PTE1,
-D(9) KM_IRQ0,
-D(10) KM_IRQ1,
-D(11) KM_SOFTIRQ0,
-D(12) KM_SOFTIRQ1,
-D(13) KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
-#undef D
+#undef __WITH_KM_FENCE
#endif /* __M32R_KMAP_TYPES_H */
diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c
index 016885c6f260..fce57e5d3f91 100644
--- a/arch/m32r/kernel/init_task.c
+++ b/arch/m32r/kernel/init_task.c
@@ -13,10 +13,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial thread structure.
*
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
index 7daf897292cf..b7a78ad429b7 100644
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -154,9 +154,9 @@ unsigned long __init zone_sizes_init(void)
* Use all area of internal RAM.
* see __alloc_pages()
*/
- NODE_DATA(1)->node_zones->pages_min = 0;
- NODE_DATA(1)->node_zones->pages_low = 0;
- NODE_DATA(1)->node_zones->pages_high = 0;
+ NODE_DATA(1)->node_zones->watermark[WMARK_MIN] = 0;
+ NODE_DATA(1)->node_zones->watermark[WMARK_LOW] = 0;
+ NODE_DATA(1)->node_zones->watermark[WMARK_HIGH] = 0;
return holes;
}
diff --git a/arch/m32r/platforms/m32104ut/setup.c b/arch/m32r/platforms/m32104ut/setup.c
index 98138b4e9220..922fdfdadeaa 100644
--- a/arch/m32r/platforms/m32104ut/setup.c
+++ b/arch/m32r/platforms/m32104ut/setup.c
@@ -63,7 +63,7 @@ static void shutdown_m32104ut_irq(unsigned int irq)
outl(M32R_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type m32104ut_irq_type =
+static struct irq_chip m32104ut_irq_type =
{
.typename = "M32104UT-IRQ",
.startup = startup_m32104ut_irq,
diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c
index 77b0ae9379e9..9c1bc7487c1e 100644
--- a/arch/m32r/platforms/m32700ut/setup.c
+++ b/arch/m32r/platforms/m32700ut/setup.c
@@ -69,7 +69,7 @@ static void shutdown_m32700ut_irq(unsigned int irq)
outl(M32R_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type m32700ut_irq_type =
+static struct irq_chip m32700ut_irq_type =
{
.typename = "M32700UT-IRQ",
.startup = startup_m32700ut_irq,
@@ -146,7 +146,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq)
outw(PLD_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type m32700ut_pld_irq_type =
+static struct irq_chip m32700ut_pld_irq_type =
{
.typename = "M32700UT-PLD-IRQ",
.startup = startup_m32700ut_pld_irq,
@@ -215,7 +215,7 @@ static void shutdown_m32700ut_lanpld_irq(unsigned int irq)
outw(PLD_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type m32700ut_lanpld_irq_type =
+static struct irq_chip m32700ut_lanpld_irq_type =
{
.typename = "M32700UT-PLD-LAN-IRQ",
.startup = startup_m32700ut_lanpld_irq,
@@ -284,7 +284,7 @@ static void shutdown_m32700ut_lcdpld_irq(unsigned int irq)
outw(PLD_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type m32700ut_lcdpld_irq_type =
+static struct irq_chip m32700ut_lcdpld_irq_type =
{
.typename = "M32700UT-PLD-LCD-IRQ",
.startup = startup_m32700ut_lcdpld_irq,
diff --git a/arch/m32r/platforms/mappi/setup.c b/arch/m32r/platforms/mappi/setup.c
index 3ec087ff2214..fb4b17799b66 100644
--- a/arch/m32r/platforms/mappi/setup.c
+++ b/arch/m32r/platforms/mappi/setup.c
@@ -63,7 +63,7 @@ static void shutdown_mappi_irq(unsigned int irq)
outl(M32R_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type mappi_irq_type =
+static struct irq_chip mappi_irq_type =
{
.typename = "MAPPI-IRQ",
.startup = startup_mappi_irq,
diff --git a/arch/m32r/platforms/mappi2/setup.c b/arch/m32r/platforms/mappi2/setup.c
index d87969c6356e..6a65eda0a056 100644
--- a/arch/m32r/platforms/mappi2/setup.c
+++ b/arch/m32r/platforms/mappi2/setup.c
@@ -70,7 +70,7 @@ static void shutdown_mappi2_irq(unsigned int irq)
outl(M32R_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type mappi2_irq_type =
+static struct irq_chip mappi2_irq_type =
{
.typename = "MAPPI2-IRQ",
.startup = startup_mappi2_irq,
diff --git a/arch/m32r/platforms/mappi3/setup.c b/arch/m32r/platforms/mappi3/setup.c
index 785b4bd6d9fd..9c337aeac94b 100644
--- a/arch/m32r/platforms/mappi3/setup.c
+++ b/arch/m32r/platforms/mappi3/setup.c
@@ -70,7 +70,7 @@ static void shutdown_mappi3_irq(unsigned int irq)
outl(M32R_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type mappi3_irq_type =
+static struct irq_chip mappi3_irq_type =
{
.typename = "MAPPI3-IRQ",
.startup = startup_mappi3_irq,
diff --git a/arch/m32r/platforms/oaks32r/setup.c b/arch/m32r/platforms/oaks32r/setup.c
index 6faa5db68e95..ed865741c38d 100644
--- a/arch/m32r/platforms/oaks32r/setup.c
+++ b/arch/m32r/platforms/oaks32r/setup.c
@@ -61,7 +61,7 @@ static void shutdown_oaks32r_irq(unsigned int irq)
outl(M32R_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type oaks32r_irq_type =
+static struct irq_chip oaks32r_irq_type =
{
.typename = "OAKS32R-IRQ",
.startup = startup_oaks32r_irq,
diff --git a/arch/m32r/platforms/opsput/setup.c b/arch/m32r/platforms/opsput/setup.c
index fab13fd85422..80d680657019 100644
--- a/arch/m32r/platforms/opsput/setup.c
+++ b/arch/m32r/platforms/opsput/setup.c
@@ -70,7 +70,7 @@ static void shutdown_opsput_irq(unsigned int irq)
outl(M32R_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type opsput_irq_type =
+static struct irq_chip opsput_irq_type =
{
.typename = "OPSPUT-IRQ",
.startup = startup_opsput_irq,
@@ -147,7 +147,7 @@ static void shutdown_opsput_pld_irq(unsigned int irq)
outw(PLD_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type opsput_pld_irq_type =
+static struct irq_chip opsput_pld_irq_type =
{
.typename = "OPSPUT-PLD-IRQ",
.startup = startup_opsput_pld_irq,
@@ -216,7 +216,7 @@ static void shutdown_opsput_lanpld_irq(unsigned int irq)
outw(PLD_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type opsput_lanpld_irq_type =
+static struct irq_chip opsput_lanpld_irq_type =
{
.typename = "OPSPUT-PLD-LAN-IRQ",
.startup = startup_opsput_lanpld_irq,
@@ -285,7 +285,7 @@ static void shutdown_opsput_lcdpld_irq(unsigned int irq)
outw(PLD_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type opsput_lcdpld_irq_type =
+static struct irq_chip opsput_lcdpld_irq_type =
{
"OPSPUT-PLD-LCD-IRQ",
startup_opsput_lcdpld_irq,
diff --git a/arch/m32r/platforms/usrv/setup.c b/arch/m32r/platforms/usrv/setup.c
index 89588d649eb7..757302660af8 100644
--- a/arch/m32r/platforms/usrv/setup.c
+++ b/arch/m32r/platforms/usrv/setup.c
@@ -61,7 +61,7 @@ static void shutdown_mappi_irq(unsigned int irq)
outl(M32R_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type mappi_irq_type =
+static struct irq_chip mappi_irq_type =
{
.typename = "M32700-IRQ",
.startup = startup_mappi_irq,
@@ -134,7 +134,7 @@ static void shutdown_m32700ut_pld_irq(unsigned int irq)
outw(PLD_ICUCR_ILEVEL7, port);
}
-static struct hw_interrupt_type m32700ut_pld_irq_type =
+static struct irq_chip m32700ut_pld_irq_type =
{
.typename = "USRV-PLD-IRQ",
.startup = startup_m32700ut_pld_irq,
diff --git a/arch/m68k/include/asm/kmap_types.h b/arch/m68k/include/asm/kmap_types.h
index c843c63d3801..3413cc1390ec 100644
--- a/arch/m68k/include/asm/kmap_types.h
+++ b/arch/m68k/include/asm/kmap_types.h
@@ -1,21 +1,6 @@
#ifndef __ASM_M68K_KMAP_TYPES_H
#define __ASM_M68K_KMAP_TYPES_H
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
#endif /* __ASM_M68K_KMAP_TYPES_H */
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index ec37fb56c127..72bad65dba3a 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -42,10 +42,6 @@
*/
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
union thread_union init_thread_union
__attribute__((section(".data.init_task"), aligned(THREAD_SIZE)))
= { INIT_THREAD_INFO(init_task) };
diff --git a/arch/m68knommu/kernel/init_task.c b/arch/m68knommu/kernel/init_task.c
index fe282de1d596..45e97a207fed 100644
--- a/arch/m68knommu/kernel/init_task.c
+++ b/arch/m68knommu/kernel/init_task.c
@@ -14,10 +14,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial task structure.
*
diff --git a/arch/microblaze/include/asm/kmap_types.h b/arch/microblaze/include/asm/kmap_types.h
index 4d7e222f5dd7..25975252d83d 100644
--- a/arch/microblaze/include/asm/kmap_types.h
+++ b/arch/microblaze/include/asm/kmap_types.h
@@ -1,29 +1,6 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
#ifndef _ASM_MICROBLAZE_KMAP_TYPES_H
#define _ASM_MICROBLAZE_KMAP_TYPES_H
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_TYPE_NR,
-};
+#include <asm-generic/kmap_types.h>
#endif /* _ASM_MICROBLAZE_KMAP_TYPES_H */
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index 783da855a2e3..d6d35b2e5fe8 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -963,7 +963,7 @@ CONFIG_EEPROM_LEGACY=y
CONFIG_SENSORS_PCF8574=y
# CONFIG_PCF8575 is not set
CONFIG_SENSORS_PCF8591=y
-CONFIG_SENSORS_MAX6875=y
+CONFIG_EEPROM_MAX6875=y
# CONFIG_SENSORS_TSL2550 is not set
CONFIG_I2C_DEBUG_CORE=y
CONFIG_I2C_DEBUG_ALGO=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 8426d3b9501c..fadb351d249b 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -1849,7 +1849,7 @@ CONFIG_EEPROM_LEGACY=m
CONFIG_SENSORS_PCF8574=m
CONFIG_SENSORS_PCA9539=m
CONFIG_SENSORS_PCF8591=m
-CONFIG_SENSORS_MAX6875=m
+CONFIG_EEPROM_MAX6875=m
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
diff --git a/arch/mips/include/asm/i8253.h b/arch/mips/include/asm/i8253.h
index 5dabc870b322..032ca73f181b 100644
--- a/arch/mips/include/asm/i8253.h
+++ b/arch/mips/include/asm/i8253.h
@@ -12,8 +12,6 @@
#define PIT_CH0 0x40
#define PIT_CH2 0x42
-#define PIT_TICK_RATE 1193182UL
-
extern spinlock_t i8253_lock;
extern void setup_pit_timer(void);
diff --git a/arch/mips/include/asm/kmap_types.h b/arch/mips/include/asm/kmap_types.h
index 806aae3c5338..58e91ed0388f 100644
--- a/arch/mips/include/asm/kmap_types.h
+++ b/arch/mips/include/asm/kmap_types.h
@@ -1,30 +1,12 @@
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
-
#ifdef CONFIG_DEBUG_HIGHMEM
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define __WITH_KM_FENCE
#endif
-enum km_type {
-D(0) KM_BOUNCE_READ,
-D(1) KM_SKB_SUNRPC_DATA,
-D(2) KM_SKB_DATA_SOFTIRQ,
-D(3) KM_USER0,
-D(4) KM_USER1,
-D(5) KM_BIO_SRC_IRQ,
-D(6) KM_BIO_DST_IRQ,
-D(7) KM_PTE0,
-D(8) KM_PTE1,
-D(9) KM_IRQ0,
-D(10) KM_IRQ1,
-D(11) KM_SOFTIRQ0,
-D(12) KM_SOFTIRQ1,
-D(13) KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
-#undef D
+#undef __WITH_KM_FENCE
#endif
diff --git a/arch/mips/kernel/init_task.c b/arch/mips/kernel/init_task.c
index 149cd914526e..5b457a40c784 100644
--- a/arch/mips/kernel/init_task.c
+++ b/arch/mips/kernel/init_task.c
@@ -11,10 +11,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial thread structure.
*
diff --git a/arch/mips/sni/eisa.c b/arch/mips/sni/eisa.c
index 7396cd719900..6827feb4de96 100644
--- a/arch/mips/sni/eisa.c
+++ b/arch/mips/sni/eisa.c
@@ -38,7 +38,7 @@ int __init sni_eisa_root_init(void)
if (!r)
return r;
- eisa_root_dev.dev.driver_data = &eisa_bus_root;
+ dev_set_drvdata(&eisa_root_dev.dev, &eisa_bus_root);
if (eisa_root_register(&eisa_bus_root)) {
/* A real bridge may have been registered before
diff --git a/arch/mn10300/include/asm/kmap_types.h b/arch/mn10300/include/asm/kmap_types.h
index 3398f9f35603..76d093b58d4f 100644
--- a/arch/mn10300/include/asm/kmap_types.h
+++ b/arch/mn10300/include/asm/kmap_types.h
@@ -1,31 +1,6 @@
-/* MN10300 kmap_atomic() slot IDs
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
#endif /* _ASM_KMAP_TYPES_H */
diff --git a/arch/mn10300/kernel/init_task.c b/arch/mn10300/kernel/init_task.c
index 5ac3566f8c98..80d423b80af3 100644
--- a/arch/mn10300/kernel/init_task.c
+++ b/arch/mn10300/kernel/init_task.c
@@ -20,9 +20,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial thread structure.
*
diff --git a/arch/parisc/include/asm/kmap_types.h b/arch/parisc/include/asm/kmap_types.h
index 806aae3c5338..58e91ed0388f 100644
--- a/arch/parisc/include/asm/kmap_types.h
+++ b/arch/parisc/include/asm/kmap_types.h
@@ -1,30 +1,12 @@
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
-
#ifdef CONFIG_DEBUG_HIGHMEM
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define __WITH_KM_FENCE
#endif
-enum km_type {
-D(0) KM_BOUNCE_READ,
-D(1) KM_SKB_SUNRPC_DATA,
-D(2) KM_SKB_DATA_SOFTIRQ,
-D(3) KM_USER0,
-D(4) KM_USER1,
-D(5) KM_BIO_SRC_IRQ,
-D(6) KM_BIO_DST_IRQ,
-D(7) KM_PTE0,
-D(8) KM_PTE1,
-D(9) KM_IRQ0,
-D(10) KM_IRQ1,
-D(11) KM_SOFTIRQ0,
-D(12) KM_SOFTIRQ1,
-D(13) KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
-#undef D
+#undef __WITH_KM_FENCE
#endif
diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c
index 1e25a45d64c1..82974b20fc10 100644
--- a/arch/parisc/kernel/init_task.c
+++ b/arch/parisc/kernel/init_task.c
@@ -36,10 +36,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial task structure.
*
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 93a61898b259..9fb344d5a86a 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -93,10 +93,6 @@ config GENERIC_HWEIGHT
bool
default y
-config GENERIC_CALIBRATE_DELAY
- bool
- default y
-
config GENERIC_FIND_NEXT_BIT
bool
default y
@@ -129,6 +125,7 @@ config PPC
select USE_GENERIC_SMP_HELPERS if SMP
select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS if PPC64
+ select GENERIC_ATOMIC64 if PPC32
config EARLY_PRINTK
bool
diff --git a/arch/powerpc/boot/install.sh b/arch/powerpc/boot/install.sh
index 51b2387bdba0..98312d169c85 100644
--- a/arch/powerpc/boot/install.sh
+++ b/arch/powerpc/boot/install.sh
@@ -18,6 +18,9 @@
# $5 and more - kernel boot files; zImage*, uImage, cuImage.*, etc.
#
+# Bail with error code if anything goes wrong
+set -e
+
# User may have a custom install script
if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 7d044dfd9236..12dc7c409616 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -1808,7 +1808,7 @@ CONFIG_PCF8575=m
CONFIG_SENSORS_PCA9539=m
CONFIG_SENSORS_PCF8591=m
# CONFIG_TPS65010 is not set
-CONFIG_SENSORS_MAX6875=m
+CONFIG_EEPROM_MAX6875=m
CONFIG_SENSORS_TSL2550=m
CONFIG_MCU_MPC8349EMITX=m
# CONFIG_I2C_DEBUG_CORE is not set
diff --git a/arch/powerpc/include/asm/8253pit.h b/arch/powerpc/include/asm/8253pit.h
index b70d6e53b303..a71c9c1455a7 100644
--- a/arch/powerpc/include/asm/8253pit.h
+++ b/arch/powerpc/include/asm/8253pit.h
@@ -1,10 +1,3 @@
-#ifndef _ASM_POWERPC_8253PIT_H
-#define _ASM_POWERPC_8253PIT_H
-
/*
* 8253/8254 Programmable Interval Timer
*/
-
-#define PIT_TICK_RATE 1193182UL
-
-#endif /* _ASM_POWERPC_8253PIT_H */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index b7d2d07b6f96..4012483b1899 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -470,6 +470,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#else /* __powerpc64__ */
+#include <asm-generic/atomic64.h>
+
#endif /* __powerpc64__ */
#include <asm-generic/atomic-long.h>
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 53512374e1c9..b7f8f4a87cc0 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -80,7 +80,7 @@ static inline void local_irq_disable(void)
__asm__ __volatile__("wrteei 0": : :"memory");
#else
unsigned long msr;
- __asm__ __volatile__("": : :"memory");
+
msr = mfmsr();
SET_MSR_EE(msr & ~MSR_EE);
#endif
@@ -92,7 +92,7 @@ static inline void local_irq_enable(void)
__asm__ __volatile__("wrteei 1": : :"memory");
#else
unsigned long msr;
- __asm__ __volatile__("": : :"memory");
+
msr = mfmsr();
SET_MSR_EE(msr | MSR_EE);
#endif
@@ -108,7 +108,6 @@ static inline void local_irq_save_ptr(unsigned long *flags)
#else
SET_MSR_EE(msr & ~MSR_EE);
#endif
- __asm__ __volatile__("": : :"memory");
}
#define local_save_flags(flags) ((flags) = mfmsr())
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 7464c0daddd1..7ead7c16fb7c 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -35,6 +35,16 @@
#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1))
#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
+/* Cell page table entries */
+#define CBE_IOPTE_PP_W 0x8000000000000000ul /* protection: write */
+#define CBE_IOPTE_PP_R 0x4000000000000000ul /* protection: read */
+#define CBE_IOPTE_M 0x2000000000000000ul /* coherency required */
+#define CBE_IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
+#define CBE_IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
+#define CBE_IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
+#define CBE_IOPTE_H 0x0000000000000800ul /* cache hint */
+#define CBE_IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
+
/* Boot time flags */
extern int iommu_is_off;
extern int iommu_force_on;
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index cdb6fd814de8..7f065e178ec4 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -53,6 +53,13 @@ enum ps3_param_av_multi_out ps3_os_area_get_av_multi_out(void);
extern u64 ps3_os_area_get_rtc_diff(void);
extern void ps3_os_area_set_rtc_diff(u64 rtc_diff);
+struct ps3_os_area_flash_ops {
+ ssize_t (*read)(void *buf, size_t count, loff_t pos);
+ ssize_t (*write)(const void *buf, size_t count, loff_t pos);
+};
+
+extern void ps3_os_area_flash_register(const struct ps3_os_area_flash_ops *ops);
+
/* dma routines */
enum ps3_dma_page_size {
@@ -418,15 +425,15 @@ static inline struct ps3_system_bus_driver *
* @data: Data to set
*/
-static inline void ps3_system_bus_set_driver_data(
+static inline void ps3_system_bus_set_drvdata(
struct ps3_system_bus_device *dev, void *data)
{
- dev->core.driver_data = data;
+ dev_set_drvdata(&dev->core, data);
}
-static inline void *ps3_system_bus_get_driver_data(
+static inline void *ps3_system_bus_get_drvdata(
struct ps3_system_bus_device *dev)
{
- return dev->core.driver_data;
+ return dev_get_drvdata(&dev->core);
}
/* These two need global scope for get_dma_ops(). */
@@ -520,7 +527,4 @@ void ps3_sync_irq(int node);
u32 ps3_get_hw_thread_id(int cpu);
u64 ps3_get_spe_id(void *arg);
-/* mutex synchronizing GPU accesses and video mode changes */
-extern struct mutex ps3_gpu_mutex;
-
#endif
diff --git a/arch/powerpc/include/asm/ps3gpu.h b/arch/powerpc/include/asm/ps3gpu.h
new file mode 100644
index 000000000000..b2b89591907c
--- /dev/null
+++ b/arch/powerpc/include/asm/ps3gpu.h
@@ -0,0 +1,86 @@
+/*
+ * PS3 GPU declarations.
+ *
+ * Copyright 2009 Sony Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ASM_POWERPC_PS3GPU_H
+#define _ASM_POWERPC_PS3GPU_H
+
+#include <linux/mutex.h>
+
+#include <asm/lv1call.h>
+
+
+#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC 0x101
+#define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP 0x102
+
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP 0x600
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT_SYNC 0x602
+#define L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE 0x603
+
+#define L1GPU_FB_BLIT_WAIT_FOR_COMPLETION (1ULL << 32)
+
+#define L1GPU_DISPLAY_SYNC_HSYNC 1
+#define L1GPU_DISPLAY_SYNC_VSYNC 2
+
+
+/* mutex synchronizing GPU accesses and video mode changes */
+extern struct mutex ps3_gpu_mutex;
+
+
+static inline int lv1_gpu_display_sync(u64 context_handle, u64 head,
+ u64 ddr_offset)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC,
+ head, ddr_offset, 0, 0);
+}
+
+static inline int lv1_gpu_display_flip(u64 context_handle, u64 head,
+ u64 ddr_offset)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP,
+ head, ddr_offset, 0, 0);
+}
+
+static inline int lv1_gpu_fb_setup(u64 context_handle, u64 xdr_lpar,
+ u64 xdr_size, u64 ioif_offset)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP,
+ xdr_lpar, xdr_size, ioif_offset, 0);
+}
+
+static inline int lv1_gpu_fb_blit(u64 context_handle, u64 ddr_offset,
+ u64 ioif_offset, u64 sync_width, u64 pitch)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
+ ddr_offset, ioif_offset, sync_width,
+ pitch);
+}
+
+static inline int lv1_gpu_fb_close(u64 context_handle)
+{
+ return lv1_gpu_context_attribute(context_handle,
+ L1GPU_CONTEXT_ATTRIBUTE_FB_CLOSE, 0,
+ 0, 0, 0);
+}
+
+#endif /* _ASM_POWERPC_PS3GPU_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index fb359b0a6937..a3c28e46947c 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -745,11 +745,11 @@
asm volatile("mfmsr %0" : "=r" (rval)); rval;})
#ifdef CONFIG_PPC64
#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
- : : "r" (v))
+ : : "r" (v) : "memory")
#define mtmsrd(v) __mtmsrd((v), 0)
#define mtmsr(v) mtmsrd(v)
#else
-#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
+#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v) : "memory")
#endif
#define mfspr(rn) ({unsigned long rval; \
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index a0b92de51c7e..370600ca2765 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -325,3 +325,4 @@ SYSCALL(inotify_init1)
SYSCALL_SPU(perf_counter_open)
COMPAT_SYS_SPU(preadv)
COMPAT_SYS_SPU(pwritev)
+COMPAT_SYS(rt_tgsigqueueinfo)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 4badac2d11d1..cef080bfc607 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -344,10 +344,11 @@
#define __NR_perf_counter_open 319
#define __NR_preadv 320
#define __NR_pwritev 321
+#define __NR_rt_tgsigqueueinfo 322
#ifdef __KERNEL__
-#define __NR_syscalls 322
+#define __NR_syscalls 323
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index a7def5f90cad..612b0c4dc26d 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -125,6 +125,7 @@ PHONY += systbl_chk
systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
$(call cmd,systbl_chk)
+ifeq ($(CONFIG_PPC_OF_BOOT_TRAMPOLINE),y)
$(obj)/built-in.o: prom_init_check
quiet_cmd_prom_init_check = CALL $<
@@ -133,5 +134,6 @@ quiet_cmd_prom_init_check = CALL $<
PHONY += prom_init_check
prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o
$(call cmd,prom_init_check)
+endif
clean-files := vmlinux.lds
diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c
index 688b329800bd..ffc4253fef55 100644
--- a/arch/powerpc/kernel/init_task.c
+++ b/arch/powerpc/kernel/init_task.c
@@ -9,10 +9,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial thread structure.
*
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 2f0e64b53642..ef6f64950e9b 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -44,10 +44,7 @@
#include <asm/sections.h>
#include <asm/machdep.h>
-#ifdef CONFIG_LOGO_LINUX_CLUT224
#include <linux/linux_logo.h>
-extern const struct linux_logo logo_linux_clut224;
-#endif
/*
* Properties whose value is longer than this get excluded from our
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f46548e66045..1f6816003ebe 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -424,8 +424,8 @@ void __init setup_system(void)
printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
#endif /* CONFIG_PPC_STD_MMU_64 */
if (PHYSICAL_START > 0)
- printk("physical_start = 0x%lx\n",
- PHYSICAL_START);
+ printk("physical_start = 0x%llx\n",
+ (unsigned long long)PHYSICAL_START);
printk("-----------------------------------------------------\n");
DBG(" <- setup_system()\n");
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index bee1443da763..15391c2ab013 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -52,6 +52,7 @@
#include <linux/jiffies.h>
#include <linux/posix-timers.h>
#include <linux/irq.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -1143,6 +1144,15 @@ void div128_by_32(u64 dividend_high, u64 dividend_low,
}
+/* We don't need to calibrate delay, we use the CPU timebase for that */
+void calibrate_delay(void)
+{
+ /* Some generic code (such as spinlock debug) use loops_per_jiffy
+ * as the number of __delay(1) in a jiffy, so make it so
+ */
+ loops_per_jiffy = tb_ticks_per_jiffy;
+}
+
static int __init rtc_init(void)
{
struct platform_device *pdev;
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 0ce45c2b42f8..c71498dbf211 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -329,7 +329,7 @@ static struct irq_host_ops msic_host_ops = {
static int axon_msi_shutdown(struct of_device *device)
{
- struct axon_msic *msic = device->dev.platform_data;
+ struct axon_msic *msic = dev_get_drvdata(&device->dev);
u32 tmp;
pr_debug("axon_msi: disabling %s\n",
@@ -416,7 +416,7 @@ static int axon_msi_probe(struct of_device *device,
msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
& MSIC_FIFO_SIZE_MASK;
- device->dev.platform_data = msic;
+ dev_set_drvdata(&device->dev, msic);
ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index bed4690de394..5b34fc211f35 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -100,16 +100,6 @@
#define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */
#define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */
-/* Page table entries */
-#define IOPTE_PP_W 0x8000000000000000ul /* protection: write */
-#define IOPTE_PP_R 0x4000000000000000ul /* protection: read */
-#define IOPTE_M 0x2000000000000000ul /* coherency required */
-#define IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
-#define IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
-#define IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
-#define IOPTE_H 0x0000000000000800ul /* cache hint */
-#define IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
-
/* IOMMU sizing */
#define IO_SEGMENT_SHIFT 28
@@ -193,19 +183,21 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
*/
const unsigned long prot = 0xc48;
base_pte =
- ((prot << (52 + 4 * direction)) & (IOPTE_PP_W | IOPTE_PP_R))
- | IOPTE_M | IOPTE_SO_RW | (window->ioid & IOPTE_IOID_Mask);
+ ((prot << (52 + 4 * direction)) &
+ (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) |
+ CBE_IOPTE_M | CBE_IOPTE_SO_RW |
+ (window->ioid & CBE_IOPTE_IOID_Mask);
#else
- base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW |
- (window->ioid & IOPTE_IOID_Mask);
+ base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
+ CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask);
#endif
if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)))
- base_pte &= ~IOPTE_SO_RW;
+ base_pte &= ~CBE_IOPTE_SO_RW;
io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
- io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask);
+ io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
mb();
@@ -231,8 +223,9 @@ static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
#else
/* spider bridge does PCI reads after freeing - insert a mapping
* to a scratch page instead of an invalid entry */
- pte = IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | __pa(window->iommu->pad_page)
- | (window->ioid & IOPTE_IOID_Mask);
+ pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW |
+ __pa(window->iommu->pad_page) |
+ (window->ioid & CBE_IOPTE_IOID_Mask);
#endif
io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
@@ -1001,7 +994,7 @@ static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
addr, ptab, segment, offset);
- ptab[offset] = base_pte | (__pa(addr) & IOPTE_RPN_Mask);
+ ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask);
}
static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
@@ -1016,14 +1009,14 @@ static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
- base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M
- | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask);
+ base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
+ (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask);
if (iommu_fixed_is_weak)
pr_info("IOMMU: Using weak ordering for fixed mapping\n");
else {
pr_info("IOMMU: Using strong ordering for fixed mapping\n");
- base_pte |= IOPTE_SO_RW;
+ base_pte |= CBE_IOPTE_SO_RW;
}
for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) {
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 296b5268754e..5e0a191764fc 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -122,8 +122,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
area->nid = nid;
area->order = order;
- area->pages = alloc_pages_node(area->nid, GFP_KERNEL | GFP_THISNODE,
- area->order);
+ area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE,
+ area->order);
if (!area->pages) {
printk(KERN_WARNING "%s: no page on node %d\n",
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 9abd210d87c1..8547e86bfb42 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -752,17 +752,8 @@ static int __init init_spu_base(void)
goto out_unregister_sysdev_class;
}
- if (ret > 0) {
- /*
- * We cannot put the forward declaration in
- * <linux/linux_logo.h> because of conflicting session type
- * conflicts for const and __initdata with different compiler
- * versions
- */
- extern const struct linux_logo logo_spe_clut224;
-
+ if (ret > 0)
fb_append_extra_logo(&logo_spe_clut224, ret);
- }
mutex_lock(&spu_full_list_mutex);
xmon_register_spus(&spu_full_list);
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
index 4543c4bc3a56..c5a87a72057b 100644
--- a/arch/powerpc/platforms/iseries/dt.c
+++ b/arch/powerpc/platforms/iseries/dt.c
@@ -204,7 +204,8 @@ static void __init dt_prop_u32(struct iseries_flat_dt *dt, const char *name,
dt_prop(dt, name, &data, sizeof(u32));
}
-static void __init dt_prop_u64(struct iseries_flat_dt *dt, const char *name,
+static void __init __maybe_unused dt_prop_u64(struct iseries_flat_dt *dt,
+ const char *name,
u64 data)
{
dt_prop(dt, name, &data, sizeof(u64));
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index 3689c2413d24..fef4d5150517 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -267,7 +267,8 @@ static struct pending_event *new_pending_event(void)
return ev;
}
-static int signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd)
+static int __maybe_unused
+signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd)
{
struct pending_event *ev = new_pending_event();
int rc;
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 9a2b6d948610..846eb8b57fd1 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -24,6 +24,7 @@
#include <linux/lmb.h>
#include <asm/firmware.h>
+#include <asm/iommu.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
@@ -605,9 +606,8 @@ static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
r->ioid,
iopte_flag);
if (result) {
- printk(KERN_WARNING "%s:%d: lv1_map_device_dma_region "
- "failed: %s\n", __func__, __LINE__,
- ps3_result(result));
+ pr_warning("%s:%d: lv1_put_iopte failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
goto fail_map;
}
DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
@@ -1001,7 +1001,8 @@ static int dma_sb_region_create_linear(struct ps3_dma_region *r)
if (len > r->len)
len = r->len;
result = dma_sb_map_area(r, virt_addr, len, &tmp,
- IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
+ CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
+ CBE_IOPTE_M);
BUG_ON(result);
}
@@ -1014,7 +1015,8 @@ static int dma_sb_region_create_linear(struct ps3_dma_region *r)
else
len -= map.rm.size - r->offset;
result = dma_sb_map_area(r, virt_addr, len, &tmp,
- IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
+ CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
+ CBE_IOPTE_M);
BUG_ON(result);
}
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index cf1cd0f8c18f..d6487a9c8019 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -226,6 +226,44 @@ static struct property property_av_multi_out = {
.value = &saved_params.av_multi_out,
};
+
+static DEFINE_MUTEX(os_area_flash_mutex);
+
+static const struct ps3_os_area_flash_ops *os_area_flash_ops;
+
+void ps3_os_area_flash_register(const struct ps3_os_area_flash_ops *ops)
+{
+ mutex_lock(&os_area_flash_mutex);
+ os_area_flash_ops = ops;
+ mutex_unlock(&os_area_flash_mutex);
+}
+EXPORT_SYMBOL_GPL(ps3_os_area_flash_register);
+
+static ssize_t os_area_flash_read(void *buf, size_t count, loff_t pos)
+{
+ ssize_t res = -ENODEV;
+
+ mutex_lock(&os_area_flash_mutex);
+ if (os_area_flash_ops)
+ res = os_area_flash_ops->read(buf, count, pos);
+ mutex_unlock(&os_area_flash_mutex);
+
+ return res;
+}
+
+static ssize_t os_area_flash_write(const void *buf, size_t count, loff_t pos)
+{
+ ssize_t res = -ENODEV;
+
+ mutex_lock(&os_area_flash_mutex);
+ if (os_area_flash_ops)
+ res = os_area_flash_ops->write(buf, count, pos);
+ mutex_unlock(&os_area_flash_mutex);
+
+ return res;
+}
+
+
/**
* os_area_set_property - Add or overwrite a saved_params value to the device tree.
*
@@ -352,12 +390,12 @@ static int db_verify(const struct os_area_db *db)
if (memcmp(db->magic_num, OS_AREA_DB_MAGIC_NUM,
sizeof(db->magic_num))) {
pr_debug("%s:%d magic_num failed\n", __func__, __LINE__);
- return -1;
+ return -EINVAL;
}
if (db->version != 1) {
pr_debug("%s:%d version failed\n", __func__, __LINE__);
- return -1;
+ return -EINVAL;
}
return 0;
@@ -578,59 +616,48 @@ static void os_area_db_init(struct os_area_db *db)
*
*/
-static void __maybe_unused update_flash_db(void)
+static int update_flash_db(void)
{
- int result;
- int file;
- off_t offset;
+ const unsigned int buf_len = 8 * OS_AREA_SEGMENT_SIZE;
+ struct os_area_header *header;
ssize_t count;
- static const unsigned int buf_len = 8 * OS_AREA_SEGMENT_SIZE;
- const struct os_area_header *header;
+ int error;
+ loff_t pos;
struct os_area_db* db;
/* Read in header and db from flash. */
- file = sys_open("/dev/ps3flash", O_RDWR, 0);
-
- if (file < 0) {
- pr_debug("%s:%d sys_open failed\n", __func__, __LINE__);
- goto fail_open;
- }
-
header = kmalloc(buf_len, GFP_KERNEL);
-
if (!header) {
- pr_debug("%s:%d kmalloc failed\n", __func__, __LINE__);
- goto fail_malloc;
+ pr_debug("%s: kmalloc failed\n", __func__);
+ return -ENOMEM;
}
- offset = sys_lseek(file, 0, SEEK_SET);
-
- if (offset != 0) {
- pr_debug("%s:%d sys_lseek failed\n", __func__, __LINE__);
- goto fail_header_seek;
+ count = os_area_flash_read(header, buf_len, 0);
+ if (count < 0) {
+ pr_debug("%s: os_area_flash_read failed %zd\n", __func__,
+ count);
+ error = count;
+ goto fail;
}
- count = sys_read(file, (char __user *)header, buf_len);
-
- result = count < OS_AREA_SEGMENT_SIZE || verify_header(header)
- || count < header->db_area_offset * OS_AREA_SEGMENT_SIZE;
-
- if (result) {
- pr_debug("%s:%d verify_header failed\n", __func__, __LINE__);
+ pos = header->db_area_offset * OS_AREA_SEGMENT_SIZE;
+ if (count < OS_AREA_SEGMENT_SIZE || verify_header(header) ||
+ count < pos) {
+ pr_debug("%s: verify_header failed\n", __func__);
dump_header(header);
- goto fail_header;
+ error = -EINVAL;
+ goto fail;
}
/* Now got a good db offset and some maybe good db data. */
- db = (void*)header + header->db_area_offset * OS_AREA_SEGMENT_SIZE;
+ db = (void *)header + pos;
- result = db_verify(db);
-
- if (result) {
- printk(KERN_NOTICE "%s:%d: Verify of flash database failed, "
- "formatting.\n", __func__, __LINE__);
+ error = db_verify(db);
+ if (error) {
+ pr_notice("%s: Verify of flash database failed, formatting.\n",
+ __func__);
dump_db(db);
os_area_db_init(db);
}
@@ -639,29 +666,16 @@ static void __maybe_unused update_flash_db(void)
db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff);
- offset = sys_lseek(file, header->db_area_offset * OS_AREA_SEGMENT_SIZE,
- SEEK_SET);
-
- if (offset != header->db_area_offset * OS_AREA_SEGMENT_SIZE) {
- pr_debug("%s:%d sys_lseek failed\n", __func__, __LINE__);
- goto fail_db_seek;
- }
-
- count = sys_write(file, (const char __user *)db,
- sizeof(struct os_area_db));
-
+ count = os_area_flash_write(db, sizeof(struct os_area_db), pos);
if (count < sizeof(struct os_area_db)) {
- pr_debug("%s:%d sys_write failed\n", __func__, __LINE__);
+ pr_debug("%s: os_area_flash_write failed %zd\n", __func__,
+ count);
+ error = count < 0 ? count : -EIO;
}
-fail_db_seek:
-fail_header:
-fail_header_seek:
+fail:
kfree(header);
-fail_malloc:
- sys_close(file);
-fail_open:
- return;
+ return error;
}
/**
@@ -674,11 +688,11 @@ fail_open:
static void os_area_queue_work_handler(struct work_struct *work)
{
struct device_node *node;
+ int error;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
node = of_find_node_by_path("/");
-
if (node) {
os_area_set_property(node, &property_rtc_diff);
of_node_put(node);
@@ -686,12 +700,10 @@ static void os_area_queue_work_handler(struct work_struct *work)
pr_debug("%s:%d of_find_node_by_path failed\n",
__func__, __LINE__);
-#if defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE)
- update_flash_db();
-#else
- printk(KERN_WARNING "%s:%d: No flash rom driver configured.\n",
- __func__, __LINE__);
-#endif
+ error = update_flash_db();
+ if (error)
+ pr_warning("%s: Could not update FLASH ROM\n", __func__);
+
pr_debug(" <- %s:%d\n", __func__, __LINE__);
}
@@ -808,7 +820,7 @@ u64 ps3_os_area_get_rtc_diff(void)
{
return saved_params.rtc_diff;
}
-EXPORT_SYMBOL(ps3_os_area_get_rtc_diff);
+EXPORT_SYMBOL_GPL(ps3_os_area_get_rtc_diff);
/**
* ps3_os_area_set_rtc_diff - Set the rtc diff value.
@@ -824,7 +836,7 @@ void ps3_os_area_set_rtc_diff(u64 rtc_diff)
os_area_queue_work();
}
}
-EXPORT_SYMBOL(ps3_os_area_set_rtc_diff);
+EXPORT_SYMBOL_GPL(ps3_os_area_set_rtc_diff);
/**
* ps3_os_area_get_av_multi_out - Returns the default video mode.
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
index 136aa0637d9c..9a196a88eda7 100644
--- a/arch/powerpc/platforms/ps3/platform.h
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -232,14 +232,4 @@ int ps3_repository_read_spu_resource_id(unsigned int res_index,
int ps3_repository_read_vuart_av_port(unsigned int *port);
int ps3_repository_read_vuart_sysmgr_port(unsigned int *port);
-/* Page table entries */
-#define IOPTE_PP_W 0x8000000000000000ul /* protection: write */
-#define IOPTE_PP_R 0x4000000000000000ul /* protection: read */
-#define IOPTE_M 0x2000000000000000ul /* coherency required */
-#define IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
-#define IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
-#define IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
-#define IOPTE_H 0x0000000000000800ul /* cache hint */
-#define IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
-
#endif
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 1a7b5ae0c83e..149bea2ce583 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -32,6 +32,7 @@
#include <asm/udbg.h>
#include <asm/prom.h>
#include <asm/lv1call.h>
+#include <asm/ps3gpu.h>
#include "platform.h"
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 9a73d0238639..9fead0faf38b 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -27,6 +27,7 @@
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/firmware.h>
+#include <asm/iommu.h>
#include "platform.h"
@@ -531,7 +532,8 @@ static void * ps3_alloc_coherent(struct device *_dev, size_t size,
}
result = ps3_dma_map(dev->d_region, virt_addr, size, dma_handle,
- IOPTE_PP_W | IOPTE_PP_R | IOPTE_SO_RW | IOPTE_M);
+ CBE_IOPTE_PP_W | CBE_IOPTE_PP_R |
+ CBE_IOPTE_SO_RW | CBE_IOPTE_M);
if (result) {
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
@@ -575,7 +577,8 @@ static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page,
result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
&bus_addr,
- IOPTE_PP_R | IOPTE_PP_W | IOPTE_SO_RW | IOPTE_M);
+ CBE_IOPTE_PP_R | CBE_IOPTE_PP_W |
+ CBE_IOPTE_SO_RW | CBE_IOPTE_M);
if (result) {
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
@@ -596,16 +599,16 @@ static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
u64 iopte_flag;
void *ptr = page_address(page) + offset;
- iopte_flag = IOPTE_M;
+ iopte_flag = CBE_IOPTE_M;
switch (direction) {
case DMA_BIDIRECTIONAL:
- iopte_flag |= IOPTE_PP_R | IOPTE_PP_W | IOPTE_SO_RW;
+ iopte_flag |= CBE_IOPTE_PP_R | CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
break;
case DMA_TO_DEVICE:
- iopte_flag |= IOPTE_PP_R | IOPTE_SO_R;
+ iopte_flag |= CBE_IOPTE_PP_R | CBE_IOPTE_SO_R;
break;
case DMA_FROM_DEVICE:
- iopte_flag |= IOPTE_PP_W | IOPTE_SO_RW;
+ iopte_flag |= CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
break;
default:
/* not happned */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 99dc3ded6b49..a14dba0e4d67 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -348,6 +348,9 @@ config ARCH_ENABLE_MEMORY_HOTPLUG
config ARCH_ENABLE_MEMORY_HOTREMOVE
def_bool y
+config ARCH_HIBERNATION_POSSIBLE
+ def_bool y if 64BIT
+
source "mm/Kconfig"
comment "I/O subsystem configuration"
@@ -592,6 +595,12 @@ config SECCOMP
endmenu
+menu "Power Management"
+
+source "kernel/power/Kconfig"
+
+endmenu
+
source "net/Kconfig"
config PCMCIA
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 578c61f15a4b..0ff387cebf88 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -88,7 +88,9 @@ LDFLAGS_vmlinux := -e start
head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o
core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
- arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
+ arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ \
+ arch/s390/power/
+
libs-y += arch/s390/lib/
drivers-y += drivers/s390/
drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 1dfc7100c7ee..264528e4f58d 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -5,7 +5,7 @@
* Exports appldata_register_ops() and appldata_unregister_ops() for the
* data gathering modules.
*
- * Copyright IBM Corp. 2003, 2008
+ * Copyright IBM Corp. 2003, 2009
*
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
@@ -26,6 +26,8 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/workqueue.h>
+#include <linux/suspend.h>
+#include <linux/platform_device.h>
#include <asm/appldata.h>
#include <asm/timer.h>
#include <asm/uaccess.h>
@@ -41,6 +43,9 @@
#define TOD_MICRO 0x01000 /* nr. of TOD clock units
for 1 microsecond */
+
+static struct platform_device *appldata_pdev;
+
/*
* /proc entries (sysctl)
*/
@@ -86,6 +91,7 @@ static atomic_t appldata_expire_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(appldata_timer_lock);
static int appldata_interval = APPLDATA_CPU_INTERVAL;
static int appldata_timer_active;
+static int appldata_timer_suspended = 0;
/*
* Work queue
@@ -475,6 +481,93 @@ void appldata_unregister_ops(struct appldata_ops *ops)
/********************** module-ops management <END> **************************/
+/**************************** suspend / resume *******************************/
+static int appldata_freeze(struct device *dev)
+{
+ struct appldata_ops *ops;
+ int rc;
+ struct list_head *lh;
+
+ get_online_cpus();
+ spin_lock(&appldata_timer_lock);
+ if (appldata_timer_active) {
+ __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
+ appldata_timer_suspended = 1;
+ }
+ spin_unlock(&appldata_timer_lock);
+ put_online_cpus();
+
+ mutex_lock(&appldata_ops_mutex);
+ list_for_each(lh, &appldata_ops_list) {
+ ops = list_entry(lh, struct appldata_ops, list);
+ if (ops->active == 1) {
+ rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
+ (unsigned long) ops->data, ops->size,
+ ops->mod_lvl);
+ if (rc != 0)
+ pr_err("Stopping the data collection for %s "
+ "failed with rc=%d\n", ops->name, rc);
+ }
+ }
+ mutex_unlock(&appldata_ops_mutex);
+ return 0;
+}
+
+static int appldata_restore(struct device *dev)
+{
+ struct appldata_ops *ops;
+ int rc;
+ struct list_head *lh;
+
+ get_online_cpus();
+ spin_lock(&appldata_timer_lock);
+ if (appldata_timer_suspended) {
+ __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
+ appldata_timer_suspended = 0;
+ }
+ spin_unlock(&appldata_timer_lock);
+ put_online_cpus();
+
+ mutex_lock(&appldata_ops_mutex);
+ list_for_each(lh, &appldata_ops_list) {
+ ops = list_entry(lh, struct appldata_ops, list);
+ if (ops->active == 1) {
+ ops->callback(ops->data); // init record
+ rc = appldata_diag(ops->record_nr,
+ APPLDATA_START_INTERVAL_REC,
+ (unsigned long) ops->data, ops->size,
+ ops->mod_lvl);
+ if (rc != 0) {
+ pr_err("Starting the data collection for %s "
+ "failed with rc=%d\n", ops->name, rc);
+ }
+ }
+ }
+ mutex_unlock(&appldata_ops_mutex);
+ return 0;
+}
+
+static int appldata_thaw(struct device *dev)
+{
+ return appldata_restore(dev);
+}
+
+static struct dev_pm_ops appldata_pm_ops = {
+ .freeze = appldata_freeze,
+ .thaw = appldata_thaw,
+ .restore = appldata_restore,
+};
+
+static struct platform_driver appldata_pdrv = {
+ .driver = {
+ .name = "appldata",
+ .owner = THIS_MODULE,
+ .pm = &appldata_pm_ops,
+ },
+};
+/************************* suspend / resume <END> ****************************/
+
+
/******************************* init / exit *********************************/
static void __cpuinit appldata_online_cpu(int cpu)
@@ -531,11 +624,23 @@ static struct notifier_block __cpuinitdata appldata_nb = {
*/
static int __init appldata_init(void)
{
- int i;
+ int i, rc;
+
+ rc = platform_driver_register(&appldata_pdrv);
+ if (rc)
+ return rc;
+ appldata_pdev = platform_device_register_simple("appldata", -1, NULL,
+ 0);
+ if (IS_ERR(appldata_pdev)) {
+ rc = PTR_ERR(appldata_pdev);
+ goto out_driver;
+ }
appldata_wq = create_singlethread_workqueue("appldata");
- if (!appldata_wq)
- return -ENOMEM;
+ if (!appldata_wq) {
+ rc = -ENOMEM;
+ goto out_device;
+ }
get_online_cpus();
for_each_online_cpu(i)
@@ -547,6 +652,12 @@ static int __init appldata_init(void)
appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
return 0;
+
+out_device:
+ platform_device_unregister(appldata_pdev);
+out_driver:
+ platform_driver_unregister(&appldata_pdrv);
+ return rc;
}
__initcall(appldata_init);
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index ba007d8df941..2a5419551176 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -1,11 +1,9 @@
/*
- * include/asm-s390/ccwdev.h
- * include/asm-s390x/ccwdev.h
+ * Copyright IBM Corp. 2002, 2009
*
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Arnd Bergmann <arndb@de.ibm.com>
+ * Author(s): Arnd Bergmann <arndb@de.ibm.com>
*
- * Interface for CCW device drivers
+ * Interface for CCW device drivers
*/
#ifndef _S390_CCWDEV_H_
#define _S390_CCWDEV_H_
@@ -104,6 +102,11 @@ struct ccw_device {
* @set_offline: called when setting device offline
* @notify: notify driver of device state changes
* @shutdown: called at device shutdown
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
* @driver: embedded device driver structure
* @name: device driver name
*/
@@ -116,6 +119,11 @@ struct ccw_driver {
int (*set_offline) (struct ccw_device *);
int (*notify) (struct ccw_device *, int);
void (*shutdown) (struct ccw_device *);
+ int (*prepare) (struct ccw_device *);
+ void (*complete) (struct ccw_device *);
+ int (*freeze)(struct ccw_device *);
+ int (*thaw) (struct ccw_device *);
+ int (*restore)(struct ccw_device *);
struct device_driver driver;
char *name;
};
@@ -184,6 +192,7 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
extern struct ccw_device *ccw_device_probe_console(void);
+extern int ccw_device_force_console(void);
// FIXME: these have to go
extern int _ccw_device_get_subchannel_number(struct ccw_device *);
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index a27f68985a79..c79c1e787b86 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -38,6 +38,11 @@ struct ccwgroup_device {
* @set_online: function called when device is set online
* @set_offline: function called when device is set offline
* @shutdown: function called when device is shut down
+ * @prepare: prepare for pm state transition
+ * @complete: undo work done in @prepare
+ * @freeze: callback for freezing during hibernation snapshotting
+ * @thaw: undo work done in @freeze
+ * @restore: callback for restoring after hibernation
* @driver: embedded driver structure
*/
struct ccwgroup_driver {
@@ -51,6 +56,11 @@ struct ccwgroup_driver {
int (*set_online) (struct ccwgroup_device *);
int (*set_offline) (struct ccwgroup_device *);
void (*shutdown)(struct ccwgroup_device *);
+ int (*prepare) (struct ccwgroup_device *);
+ void (*complete) (struct ccwgroup_device *);
+ int (*freeze)(struct ccwgroup_device *);
+ int (*thaw) (struct ccwgroup_device *);
+ int (*restore)(struct ccwgroup_device *);
struct device_driver driver;
};
diff --git a/arch/s390/include/asm/kmap_types.h b/arch/s390/include/asm/kmap_types.h
index fd1574648223..94ec3ee07983 100644
--- a/arch/s390/include/asm/kmap_types.h
+++ b/arch/s390/include/asm/kmap_types.h
@@ -2,22 +2,7 @@
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
#endif
#endif /* __KERNEL__ */
diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h
new file mode 100644
index 000000000000..dc75c616eafe
--- /dev/null
+++ b/arch/s390/include/asm/suspend.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_S390_SUSPEND_H
+#define __ASM_S390_SUSPEND_H
+
+static inline int arch_prepare_suspend(void)
+{
+ return 0;
+}
+
+#endif
+
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 3a8b26eb1f2e..4fb83c1cdb77 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -1,11 +1,7 @@
/*
- * include/asm-s390/system.h
+ * Copyright IBM Corp. 1999, 2009
*
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- *
- * Derived from "include/asm-i386/system.h"
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#ifndef __ASM_SYSTEM_H
@@ -469,6 +465,20 @@ extern psw_t sysc_restore_trace_psw;
extern psw_t io_restore_trace_psw;
#endif
+static inline int tprot(unsigned long addr)
+{
+ int rc = -EFAULT;
+
+ asm volatile(
+ " tprot 0(%1),0\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+d" (rc) : "a" (addr) : "cc");
+ return rc;
+}
+
#endif /* __KERNEL__ */
#endif
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index fb263736826c..f9b144049dc9 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -1,7 +1,7 @@
/*
* arch/s390/kernel/early.c
*
- * Copyright IBM Corp. 2007
+ * Copyright IBM Corp. 2007, 2009
* Author(s): Hongjie Yang <hongjie@us.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
@@ -210,7 +210,7 @@ static noinline __init void detect_machine_type(void)
machine_flags |= MACHINE_FLAG_VM;
}
-static __init void early_pgm_check_handler(void)
+static void early_pgm_check_handler(void)
{
unsigned long addr;
const struct exception_table_entry *fixup;
@@ -222,7 +222,7 @@ static __init void early_pgm_check_handler(void)
S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
}
-static noinline __init void setup_lowcore_early(void)
+void setup_lowcore_early(void)
{
psw_t psw;
diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c
index 7db95c0b8693..fe787f9e5f3f 100644
--- a/arch/s390/kernel/init_task.c
+++ b/arch/s390/kernel/init_task.c
@@ -18,10 +18,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial thread structure.
*
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
index 9872999c66d1..559af0d07878 100644
--- a/arch/s390/kernel/mem_detect.c
+++ b/arch/s390/kernel/mem_detect.c
@@ -1,6 +1,7 @@
/*
- * Copyright IBM Corp. 2008
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Copyright IBM Corp. 2008, 2009
+ *
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/kernel.h>
@@ -9,20 +10,6 @@
#include <asm/sclp.h>
#include <asm/setup.h>
-static inline int tprot(unsigned long addr)
-{
- int rc = -EFAULT;
-
- asm volatile(
- " tprot 0(%1),0\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+d" (rc) : "a" (addr) : "cc");
- return rc;
-}
-
#define ADDR2G (1ULL << 31)
static void find_memory_chunks(struct mem_chunk chunk[])
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index cc8c484984e3..fd8e3111a4e8 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1,7 +1,7 @@
/*
* arch/s390/kernel/smp.c
*
- * Copyright IBM Corp. 1999,2007
+ * Copyright IBM Corp. 1999, 2009
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
* Heiko Carstens (heiko.carstens@de.ibm.com)
@@ -1031,6 +1031,42 @@ out:
static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
dispatching_store);
+/*
+ * If the resume kernel runs on another cpu than the suspended kernel,
+ * we have to switch the cpu IDs in the logical map.
+ */
+void smp_switch_boot_cpu_in_resume(u32 resume_phys_cpu_id,
+ struct _lowcore *suspend_lowcore)
+{
+ int cpu, suspend_cpu_id, resume_cpu_id;
+ u32 suspend_phys_cpu_id;
+
+ suspend_phys_cpu_id = __cpu_logical_map[suspend_lowcore->cpu_nr];
+ suspend_cpu_id = suspend_lowcore->cpu_nr;
+
+ for_each_present_cpu(cpu) {
+ if (__cpu_logical_map[cpu] == resume_phys_cpu_id) {
+ resume_cpu_id = cpu;
+ goto found;
+ }
+ }
+ panic("Could not find resume cpu in logical map.\n");
+
+found:
+ printk("Resume cpu ID: %i/%i\n", resume_phys_cpu_id, resume_cpu_id);
+ printk("Suspend cpu ID: %i/%i\n", suspend_phys_cpu_id, suspend_cpu_id);
+
+ __cpu_logical_map[resume_cpu_id] = suspend_phys_cpu_id;
+ __cpu_logical_map[suspend_cpu_id] = resume_phys_cpu_id;
+
+ lowcore_ptr[suspend_cpu_id]->cpu_addr = resume_phys_cpu_id;
+}
+
+u32 smp_get_phys_cpu_id(void)
+{
+ return __cpu_logical_map[smp_processor_id()];
+}
+
static int __init topology_init(void)
{
int cpu;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 4ca8e826bf30..565667207985 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -313,3 +313,22 @@ int s390_enable_sie(void)
return 0;
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+#ifdef CONFIG_HIBERNATION
+bool kernel_page_present(struct page *page)
+{
+ unsigned long addr;
+ int cc;
+
+ addr = page_to_phys(page);
+ asm("lra %1,0(%1)\n"
+ "ipm %0\n"
+ "srl %0,28"
+ :"=d"(cc),"+a"(addr)::"cc");
+ return cc == 0;
+}
+
+#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_DEBUG_PAGEALLOC */
+
diff --git a/arch/s390/power/Makefile b/arch/s390/power/Makefile
new file mode 100644
index 000000000000..973bb45a8fec
--- /dev/null
+++ b/arch/s390/power/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for s390 PM support
+#
+
+obj-$(CONFIG_HIBERNATION) += suspend.o
+obj-$(CONFIG_HIBERNATION) += swsusp.o
+obj-$(CONFIG_HIBERNATION) += swsusp_64.o
+obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o
diff --git a/arch/s390/power/suspend.c b/arch/s390/power/suspend.c
new file mode 100644
index 000000000000..b3351eceebbe
--- /dev/null
+++ b/arch/s390/power/suspend.c
@@ -0,0 +1,40 @@
+/*
+ * Suspend support specific for s390.
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/suspend.h>
+#include <linux/reboot.h>
+#include <linux/pfn.h>
+#include <asm/sections.h>
+#include <asm/ipl.h>
+
+/*
+ * References to section boundaries
+ */
+extern const void __nosave_begin, __nosave_end;
+
+/*
+ * check if given pfn is in the 'nosave' or in the read only NSS section
+ */
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end))
+ >> PAGE_SHIFT;
+ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
+ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
+
+ if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
+ return 1;
+ if (pfn >= stext_pfn && pfn <= eshared_pfn) {
+ if (ipl_info.type == IPL_TYPE_NSS)
+ return 1;
+ } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0))
+ return 1;
+ return 0;
+}
diff --git a/arch/s390/power/swsusp.c b/arch/s390/power/swsusp.c
new file mode 100644
index 000000000000..e6a4fe9f5f24
--- /dev/null
+++ b/arch/s390/power/swsusp.c
@@ -0,0 +1,30 @@
+/*
+ * Support for suspend and resume on s390
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ *
+ */
+
+
+/*
+ * save CPU registers before creating a hibernation image and before
+ * restoring the memory state from it
+ */
+void save_processor_state(void)
+{
+ /* implentation contained in the
+ * swsusp_arch_suspend function
+ */
+}
+
+/*
+ * restore the contents of CPU registers
+ */
+void restore_processor_state(void)
+{
+ /* implentation contained in the
+ * swsusp_arch_resume function
+ */
+}
diff --git a/arch/s390/power/swsusp_64.c b/arch/s390/power/swsusp_64.c
new file mode 100644
index 000000000000..9516a517d72f
--- /dev/null
+++ b/arch/s390/power/swsusp_64.c
@@ -0,0 +1,17 @@
+/*
+ * Support for suspend and resume on s390
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ *
+ */
+
+#include <asm/system.h>
+#include <linux/interrupt.h>
+
+void do_after_copyback(void)
+{
+ mb();
+}
+
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S
new file mode 100644
index 000000000000..3c74e7d827c9
--- /dev/null
+++ b/arch/s390/power/swsusp_asm64.S
@@ -0,0 +1,199 @@
+/*
+ * S390 64-bit swsusp implementation
+ *
+ * Copyright IBM Corp. 2009
+ *
+ * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
+ * Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * Save register context in absolute 0 lowcore and call swsusp_save() to
+ * create in-memory kernel image. The context is saved in the designated
+ * "store status" memory locations (see POP).
+ * We return from this function twice. The first time during the suspend to
+ * disk process. The second time via the swsusp_arch_resume() function
+ * (see below) in the resume process.
+ * This function runs with disabled interrupts.
+ */
+ .section .text
+ .align 2
+ .globl swsusp_arch_suspend
+swsusp_arch_suspend:
+ stmg %r6,%r15,__SF_GPRS(%r15)
+ lgr %r1,%r15
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ stg %r1,__SF_BACKCHAIN(%r15)
+
+ /* Deactivate DAT */
+ stnsm __SF_EMPTY(%r15),0xfb
+
+ /* Switch off lowcore protection */
+ stctg %c0,%c0,__SF_EMPTY(%r15)
+ ni __SF_EMPTY+4(%r15),0xef
+ lctlg %c0,%c0,__SF_EMPTY(%r15)
+
+ /* Store prefix register on stack */
+ stpx __SF_EMPTY(%r15)
+
+ /* Setup base register for lowcore (absolute 0) */
+ llgf %r1,__SF_EMPTY(%r15)
+
+ /* Get pointer to save area */
+ aghi %r1,0x1000
+
+ /* Store registers */
+ mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
+ stfpc 0x31c(%r1) /* store fpu control */
+ std 0,0x200(%r1) /* store f0 */
+ std 1,0x208(%r1) /* store f1 */
+ std 2,0x210(%r1) /* store f2 */
+ std 3,0x218(%r1) /* store f3 */
+ std 4,0x220(%r1) /* store f4 */
+ std 5,0x228(%r1) /* store f5 */
+ std 6,0x230(%r1) /* store f6 */
+ std 7,0x238(%r1) /* store f7 */
+ std 8,0x240(%r1) /* store f8 */
+ std 9,0x248(%r1) /* store f9 */
+ std 10,0x250(%r1) /* store f10 */
+ std 11,0x258(%r1) /* store f11 */
+ std 12,0x260(%r1) /* store f12 */
+ std 13,0x268(%r1) /* store f13 */
+ std 14,0x270(%r1) /* store f14 */
+ std 15,0x278(%r1) /* store f15 */
+ stam %a0,%a15,0x340(%r1) /* store access registers */
+ stctg %c0,%c15,0x380(%r1) /* store control registers */
+ stmg %r0,%r15,0x280(%r1) /* store general registers */
+
+ stpt 0x328(%r1) /* store timer */
+ stckc 0x330(%r1) /* store clock comparator */
+
+ /* Activate DAT */
+ stosm __SF_EMPTY(%r15),0x04
+
+ /* Set prefix page to zero */
+ xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
+ spx __SF_EMPTY(%r15)
+
+ /* Setup lowcore */
+ brasl %r14,setup_lowcore_early
+
+ /* Save image */
+ brasl %r14,swsusp_save
+
+ /* Switch on lowcore protection */
+ stctg %c0,%c0,__SF_EMPTY(%r15)
+ oi __SF_EMPTY+4(%r15),0x10
+ lctlg %c0,%c0,__SF_EMPTY(%r15)
+
+ /* Restore prefix register and return */
+ lghi %r1,0x1000
+ spx 0x318(%r1)
+ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+ lghi %r2,0
+ br %r14
+
+/*
+ * Restore saved memory image to correct place and restore register context.
+ * Then we return to the function that called swsusp_arch_suspend().
+ * swsusp_arch_resume() runs with disabled interrupts.
+ */
+ .globl swsusp_arch_resume
+swsusp_arch_resume:
+ stmg %r6,%r15,__SF_GPRS(%r15)
+ lgr %r1,%r15
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ stg %r1,__SF_BACKCHAIN(%r15)
+
+ /* Save boot cpu number */
+ brasl %r14,smp_get_phys_cpu_id
+ lgr %r10,%r2
+
+ /* Deactivate DAT */
+ stnsm __SF_EMPTY(%r15),0xfb
+
+ /* Switch off lowcore protection */
+ stctg %c0,%c0,__SF_EMPTY(%r15)
+ ni __SF_EMPTY+4(%r15),0xef
+ lctlg %c0,%c0,__SF_EMPTY(%r15)
+
+ /* Set prefix page to zero */
+ xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
+ spx __SF_EMPTY(%r15)
+
+ /* Restore saved image */
+ larl %r1,restore_pblist
+ lg %r1,0(%r1)
+ ltgr %r1,%r1
+ jz 2f
+0:
+ lg %r2,8(%r1)
+ lg %r4,0(%r1)
+ lghi %r3,PAGE_SIZE
+ lghi %r5,PAGE_SIZE
+1:
+ mvcle %r2,%r4,0
+ jo 1b
+ lg %r1,16(%r1)
+ ltgr %r1,%r1
+ jnz 0b
+2:
+ ptlb /* flush tlb */
+
+ /* Restore registers */
+ lghi %r13,0x1000 /* %r1 = pointer to save arae */
+
+ spt 0x328(%r13) /* reprogram timer */
+ //sckc 0x330(%r13) /* set clock comparator */
+
+ lctlg %c0,%c15,0x380(%r13) /* load control registers */
+ lam %a0,%a15,0x340(%r13) /* load access registers */
+
+ lfpc 0x31c(%r13) /* load fpu control */
+ ld 0,0x200(%r13) /* load f0 */
+ ld 1,0x208(%r13) /* load f1 */
+ ld 2,0x210(%r13) /* load f2 */
+ ld 3,0x218(%r13) /* load f3 */
+ ld 4,0x220(%r13) /* load f4 */
+ ld 5,0x228(%r13) /* load f5 */
+ ld 6,0x230(%r13) /* load f6 */
+ ld 7,0x238(%r13) /* load f7 */
+ ld 8,0x240(%r13) /* load f8 */
+ ld 9,0x248(%r13) /* load f9 */
+ ld 10,0x250(%r13) /* load f10 */
+ ld 11,0x258(%r13) /* load f11 */
+ ld 12,0x260(%r13) /* load f12 */
+ ld 13,0x268(%r13) /* load f13 */
+ ld 14,0x270(%r13) /* load f14 */
+ ld 15,0x278(%r13) /* load f15 */
+
+ /* Load old stack */
+ lg %r15,0x2f8(%r13)
+
+ /* Pointer to save arae */
+ lghi %r13,0x1000
+
+ /* Switch CPUs */
+ lgr %r2,%r10 /* get cpu id */
+ llgf %r3,0x318(%r13)
+ brasl %r14,smp_switch_boot_cpu_in_resume
+
+ /* Restore prefix register */
+ spx 0x318(%r13)
+
+ /* Switch on lowcore protection */
+ stctg %c0,%c0,__SF_EMPTY(%r15)
+ oi __SF_EMPTY+4(%r15),0x10
+ lctlg %c0,%c0,__SF_EMPTY(%r15)
+
+ /* Activate DAT */
+ stosm __SF_EMPTY(%r15),0x04
+
+ /* Return 0 */
+ lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
+ lghi %r2,0
+ br %r14
diff --git a/arch/sh/include/asm/kmap_types.h b/arch/sh/include/asm/kmap_types.h
index 84d565c696be..5962b08b6dd8 100644
--- a/arch/sh/include/asm/kmap_types.h
+++ b/arch/sh/include/asm/kmap_types.h
@@ -3,30 +3,12 @@
/* Dummy header just to define km_type. */
-
#ifdef CONFIG_DEBUG_HIGHMEM
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define __WITH_KM_FENCE
#endif
-enum km_type {
-D(0) KM_BOUNCE_READ,
-D(1) KM_SKB_SUNRPC_DATA,
-D(2) KM_SKB_DATA_SOFTIRQ,
-D(3) KM_USER0,
-D(4) KM_USER1,
-D(5) KM_BIO_SRC_IRQ,
-D(6) KM_BIO_DST_IRQ,
-D(7) KM_PTE0,
-D(8) KM_PTE1,
-D(9) KM_IRQ0,
-D(10) KM_IRQ1,
-D(11) KM_SOFTIRQ0,
-D(12) KM_SOFTIRQ1,
-D(13) KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
-#undef D
+#undef __WITH_KM_FENCE
#endif
diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c
index 80c35ff71d56..1719957c0a69 100644
--- a/arch/sh/kernel/init_task.c
+++ b/arch/sh/kernel/init_task.c
@@ -10,9 +10,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct pt_regs fake_swapper_regs;
-struct mm_struct init_mm = INIT_MM(init_mm);
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial thread structure.
*
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index cc12cd48bbc5..3f8b6a92eabd 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -37,6 +37,8 @@ config SPARC64
select HAVE_KPROBES
select HAVE_LMB
select HAVE_SYSCALL_WRAPPERS
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
select USE_GENERIC_SMP_HELPERS if SMP
select RTC_DRV_CMOS
select RTC_DRV_BQ4802
@@ -93,6 +95,9 @@ config AUDIT_ARCH
config HAVE_SETUP_PER_CPU_AREA
def_bool y if SPARC64
+config HAVE_DYNAMIC_PER_CPU_AREA
+ def_bool y if SPARC64
+
config GENERIC_HARDIRQS_NO__DO_IRQ
bool
def_bool y if SPARC64
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index b5d63bd8716e..0123a4c596ce 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-rc2
-# Fri Apr 17 02:03:07 2009
+# Linux kernel version: 2.6.30
+# Tue Jun 16 04:59:36 2009
#
CONFIG_64BIT=y
CONFIG_SPARC=y
@@ -19,6 +19,7 @@ CONFIG_LOCKDEP_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
CONFIG_AUDIT_ARCH=y
CONFIG_HAVE_SETUP_PER_CPU_AREA=y
+CONFIG_HAVE_DYNAMIC_PER_CPU_AREA=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_MMU=y
CONFIG_ARCH_NO_VIRT_TO_BUS=y
@@ -82,7 +83,6 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
@@ -95,16 +95,21 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+
+#
+# Performance Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
CONFIG_PCI_QUIRKS=y
CONFIG_SLUB_DEBUG=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
-# CONFIG_MARKERS is not set
+CONFIG_MARKERS=y
CONFIG_OPROFILE=m
CONFIG_HAVE_OPROFILE=y
CONFIG_KPROBES=y
@@ -202,6 +207,7 @@ CONFIG_NR_QUICK=1
CONFIG_UNEVICTABLE_LRU=y
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=8192
CONFIG_SCHED_SMT=y
CONFIG_SCHED_MC=y
# CONFIG_PREEMPT_NONE is not set
@@ -321,6 +327,7 @@ CONFIG_VLAN_8021Q=m
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
@@ -340,7 +347,11 @@ CONFIG_WIRELESS=y
CONFIG_WIRELESS_OLD_REGULATORY=y
# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
-# CONFIG_MAC80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
+CONFIG_MAC80211_DEFAULT_PS_VALUE=0
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -364,6 +375,7 @@ CONFIG_EXTRA_FIRMWARE=""
CONFIG_CONNECTOR=m
# CONFIG_MTD is not set
CONFIG_OF_DEVICE=y
+CONFIG_OF_MDIO=m
# CONFIG_PARPORT is not set
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_FD is not set
@@ -399,6 +411,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_EEPROM_AT24 is not set
# CONFIG_EEPROM_LEGACY is not set
# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
CONFIG_HAVE_IDE=y
CONFIG_IDE=y
@@ -477,10 +490,6 @@ CONFIG_BLK_DEV_SR=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_LOGGING is not set
@@ -499,6 +508,7 @@ CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
# CONFIG_SCSI_CXGB3_ISCSI is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
@@ -507,6 +517,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_ARCMSR is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
@@ -521,7 +532,6 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_IPS is not set
# CONFIG_SCSI_INITIO is not set
# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_MVSAS is not set
# CONFIG_SCSI_STEX is not set
# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
@@ -569,7 +579,6 @@ CONFIG_DM_ZERO=m
# CONFIG_IEEE1394 is not set
# CONFIG_I2O is not set
CONFIG_NETDEVICES=y
-CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@@ -635,6 +644,7 @@ CONFIG_NET_PCI=y
# CONFIG_SMSC9420 is not set
# CONFIG_SUNDANCE is not set
# CONFIG_TLAN is not set
+# CONFIG_KS8842 is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_SC92031 is not set
# CONFIG_ATL2 is not set
@@ -1127,6 +1137,11 @@ CONFIG_SND_VERBOSE_PROCFS=y
# CONFIG_SND_VERBOSE_PRINTK is not set
# CONFIG_SND_DEBUG is not set
CONFIG_SND_VMASTER=y
+CONFIG_SND_RAWMIDI_SEQ=m
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
CONFIG_SND_MPU401_UART=m
CONFIG_SND_AC97_CODEC=m
CONFIG_SND_DRIVERS=y
@@ -1153,6 +1168,7 @@ CONFIG_SND_ALI5451=m
# CONFIG_SND_OXYGEN is not set
# CONFIG_SND_CS4281 is not set
# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CTXFI is not set
# CONFIG_SND_DARLA20 is not set
# CONFIG_SND_GINA20 is not set
# CONFIG_SND_LAYLA20 is not set
@@ -1183,6 +1199,7 @@ CONFIG_SND_ALI5451=m
# CONFIG_SND_INTEL8X0 is not set
# CONFIG_SND_INTEL8X0M is not set
# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LX6464ES is not set
# CONFIG_SND_MAESTRO3 is not set
# CONFIG_SND_MIXART is not set
# CONFIG_SND_NM256 is not set
@@ -1229,6 +1246,7 @@ CONFIG_HID_BELKIN=y
CONFIG_HID_CHERRY=y
CONFIG_HID_CHICONY=y
CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
# CONFIG_DRAGONRISE_FF is not set
CONFIG_HID_EZKEY=y
CONFIG_HID_KYE=y
@@ -1246,9 +1264,14 @@ CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
# CONFIG_GREENASIA_FF is not set
+CONFIG_HID_SMARTJOYPLUS=y
+# CONFIG_SMARTJOYPLUS_FF is not set
CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
# CONFIG_THRUSTMASTER_FF is not set
+CONFIG_HID_ZEROPLUS=y
# CONFIG_ZEROPLUS_FF is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
@@ -1462,6 +1485,7 @@ CONFIG_FILE_LOCKING=y
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -1636,25 +1660,28 @@ CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_NOP_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_RING_BUFFER=y
+CONFIG_EVENT_TRACING=y
+CONFIG_CONTEXT_SWITCH_TRACER=y
CONFIG_TRACING=y
+CONFIG_GENERIC_TRACER=y
CONFIG_TRACING_SUPPORT=y
-
-#
-# Tracers
-#
+CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_EVENT_TRACER is not set
# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
CONFIG_BLK_DEV_IO_TRACE=y
# CONFIG_FTRACE_STARTUP_TEST is not set
+# CONFIG_RING_BUFFER_BENCHMARK is not set
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index a11b89ee9ef8..926397d345ff 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -6,9 +6,6 @@
#ifndef _SPARC64_CPUDATA_H
#define _SPARC64_CPUDATA_H
-#include <asm/hypervisor.h>
-#include <asm/asi.h>
-
#ifndef __ASSEMBLY__
#include <linux/percpu.h>
@@ -38,202 +35,10 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
#define local_cpu_data() __get_cpu_var(__cpu_data)
-/* Trap handling code needs to get at a few critical values upon
- * trap entry and to process TSB misses. These cannot be in the
- * per_cpu() area as we really need to lock them into the TLB and
- * thus make them part of the main kernel image. As a result we
- * try to make this as small as possible.
- *
- * This is padded out and aligned to 64-bytes to avoid false sharing
- * on SMP.
- */
-
-/* If you modify the size of this structure, please update
- * TRAP_BLOCK_SZ_SHIFT below.
- */
-struct thread_info;
-struct trap_per_cpu {
-/* D-cache line 1: Basic thread information, cpu and device mondo queues */
- struct thread_info *thread;
- unsigned long pgd_paddr;
- unsigned long cpu_mondo_pa;
- unsigned long dev_mondo_pa;
-
-/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
- unsigned long resum_mondo_pa;
- unsigned long resum_kernel_buf_pa;
- unsigned long nonresum_mondo_pa;
- unsigned long nonresum_kernel_buf_pa;
-
-/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
- struct hv_fault_status fault_info;
-
-/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
- unsigned long cpu_mondo_block_pa;
- unsigned long cpu_list_pa;
- unsigned long tsb_huge;
- unsigned long tsb_huge_temp;
-
-/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
- unsigned long irq_worklist_pa;
- unsigned int cpu_mondo_qmask;
- unsigned int dev_mondo_qmask;
- unsigned int resum_qmask;
- unsigned int nonresum_qmask;
- void *hdesc;
-} __attribute__((aligned(64)));
-extern struct trap_per_cpu trap_block[NR_CPUS];
-extern void init_cur_cpu_trap(struct thread_info *);
-extern void setup_tba(void);
-extern int ncpus_probed;
extern const struct seq_operations cpuinfo_op;
-extern unsigned long real_hard_smp_processor_id(void);
-
-struct cpuid_patch_entry {
- unsigned int addr;
- unsigned int cheetah_safari[4];
- unsigned int cheetah_jbus[4];
- unsigned int starfire[4];
- unsigned int sun4v[4];
-};
-extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
-
-struct sun4v_1insn_patch_entry {
- unsigned int addr;
- unsigned int insn;
-};
-extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
- __sun4v_1insn_patch_end;
-
-struct sun4v_2insn_patch_entry {
- unsigned int addr;
- unsigned int insns[2];
-};
-extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
- __sun4v_2insn_patch_end;
-
#endif /* !(__ASSEMBLY__) */
-#define TRAP_PER_CPU_THREAD 0x00
-#define TRAP_PER_CPU_PGD_PADDR 0x08
-#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
-#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
-#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
-#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
-#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
-#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
-#define TRAP_PER_CPU_FAULT_INFO 0x40
-#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
-#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
-#define TRAP_PER_CPU_TSB_HUGE 0xd0
-#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
-#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
-#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
-#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
-#define TRAP_PER_CPU_RESUM_QMASK 0xf0
-#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
-
-#define TRAP_BLOCK_SZ_SHIFT 8
-
-#include <asm/scratchpad.h>
-
-#define __GET_CPUID(REG) \
- /* Spitfire implementation (default). */ \
-661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
- srlx REG, 17, REG; \
- and REG, 0x1f, REG; \
- nop; \
- .section .cpuid_patch, "ax"; \
- /* Instruction location. */ \
- .word 661b; \
- /* Cheetah Safari implementation. */ \
- ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
- srlx REG, 17, REG; \
- and REG, 0x3ff, REG; \
- nop; \
- /* Cheetah JBUS implementation. */ \
- ldxa [%g0] ASI_JBUS_CONFIG, REG; \
- srlx REG, 17, REG; \
- and REG, 0x1f, REG; \
- nop; \
- /* Starfire implementation. */ \
- sethi %hi(0x1fff40000d0 >> 9), REG; \
- sllx REG, 9, REG; \
- or REG, 0xd0, REG; \
- lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
- /* sun4v implementation. */ \
- mov SCRATCHPAD_CPUID, REG; \
- ldxa [REG] ASI_SCRATCHPAD, REG; \
- nop; \
- nop; \
- .previous;
-
-#ifdef CONFIG_SMP
-
-#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- __GET_CPUID(TMP) \
- sethi %hi(trap_block), DEST; \
- sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
- or DEST, %lo(trap_block), DEST; \
- add DEST, TMP, DEST; \
-
-/* Clobbers TMP, current address space PGD phys address into DEST. */
-#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
-
-/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
-#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
-
-/* Clobbers TMP, loads DEST with current thread info pointer. */
-#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
-
-/* Given the current thread info pointer in THR, load the per-cpu
- * area base of the current processor into DEST. REG1, REG2, and REG3 are
- * clobbered.
- *
- * You absolutely cannot use DEST as a temporary in this code. The
- * reason is that traps can happen during execution, and return from
- * trap will load the fully resolved DEST per-cpu base. This can corrupt
- * the calculations done by the macro mid-stream.
- */
-#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
- lduh [THR + TI_CPU], REG1; \
- sethi %hi(__per_cpu_shift), REG3; \
- sethi %hi(__per_cpu_base), REG2; \
- ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
- ldx [REG2 + %lo(__per_cpu_base)], REG2; \
- sllx REG1, REG3, REG3; \
- add REG3, REG2, DEST;
-
-#else
-
-#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- sethi %hi(trap_block), DEST; \
- or DEST, %lo(trap_block), DEST; \
-
-/* Uniprocessor versions, we know the cpuid is zero. */
-#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
-
-/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
-#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
-
-#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
- TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
- ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
-
-/* No per-cpu areas on uniprocessor, so no need to load DEST. */
-#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
-
-#endif /* !(CONFIG_SMP) */
+#include <asm/trap_block.h>
#endif /* _SPARC64_CPUDATA_H */
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 0f4150e26619..204e4bf64438 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -1,8 +1,166 @@
#ifndef ___ASM_SPARC_DMA_MAPPING_H
#define ___ASM_SPARC_DMA_MAPPING_H
-#if defined(__sparc__) && defined(__arch64__)
-#include <asm/dma-mapping_64.h>
-#else
-#include <asm/dma-mapping_32.h>
-#endif
+
+#include <linux/scatterlist.h>
+#include <linux/mm.h>
+
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
+
+extern int dma_supported(struct device *dev, u64 mask);
+extern int dma_set_mask(struct device *dev, u64 dma_mask);
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+#define dma_is_consistent(d, h) (1)
+
+struct dma_ops {
+ void *(*alloc_coherent)(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag);
+ void (*free_coherent)(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle);
+ dma_addr_t (*map_page)(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction);
+ void (*unmap_page)(struct device *dev, dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction direction);
+ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction);
+ void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
+ int nhwentries,
+ enum dma_data_direction direction);
+ void (*sync_single_for_cpu)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction);
+ void (*sync_single_for_device)(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction);
+ void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
+ int nelems,
+ enum dma_data_direction direction);
+ void (*sync_sg_for_device)(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+};
+extern const struct dma_ops *dma_ops;
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ return dma_ops->map_page(dev, virt_to_page(cpu_addr),
+ (unsigned long)cpu_addr & ~PAGE_MASK, size,
+ direction);
+}
+
+static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ dma_ops->unmap_page(dev, dma_addr, size, direction);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ return dma_ops->map_page(dev, page, offset, size, direction);
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ dma_ops->unmap_page(dev, dma_address, size, direction);
+}
+
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ return dma_ops->map_sg(dev, sg, nents, direction);
+}
+
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
+{
+ dma_ops->unmap_sg(dev, sg, nents, direction);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ if (dma_ops->sync_single_for_device)
+ dma_ops->sync_single_for_device(dev, dma_handle, size,
+ direction);
+}
+
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
+}
+
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ if (dma_ops->sync_sg_for_device)
+ dma_ops->sync_sg_for_device(dev, sg, nelems, direction);
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_for_cpu(dev, dma_handle+offset, size, dir);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ dma_sync_single_for_device(dev, dma_handle+offset, size, dir);
+}
+
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return (dma_addr == DMA_ERROR_CODE);
+}
+
+static inline int dma_get_cache_alignment(void)
+{
+ /*
+ * no easy way to get cache size on all processors, so return
+ * the maximum possible, to be safe
+ */
+ return (1 << INTERNODE_CACHE_SHIFT);
+}
+
#endif
diff --git a/arch/sparc/include/asm/dma-mapping_32.h b/arch/sparc/include/asm/dma-mapping_32.h
deleted file mode 100644
index 8a57ea0573e6..000000000000
--- a/arch/sparc/include/asm/dma-mapping_32.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef _ASM_SPARC_DMA_MAPPING_H
-#define _ASM_SPARC_DMA_MAPPING_H
-
-#include <linux/types.h>
-
-struct device;
-struct scatterlist;
-struct page;
-
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-extern void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle);
-extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size,
- enum dma_data_direction direction);
-extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction);
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction);
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction);
-extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction);
-extern void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction);
-extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
-extern int dma_get_cache_alignment(void);
-
-#define dma_alloc_noncoherent dma_alloc_coherent
-#define dma_free_noncoherent dma_free_coherent
-
-#endif /* _ASM_SPARC_DMA_MAPPING_H */
diff --git a/arch/sparc/include/asm/dma-mapping_64.h b/arch/sparc/include/asm/dma-mapping_64.h
deleted file mode 100644
index bfa64f9702d5..000000000000
--- a/arch/sparc/include/asm/dma-mapping_64.h
+++ /dev/null
@@ -1,154 +0,0 @@
-#ifndef _ASM_SPARC64_DMA_MAPPING_H
-#define _ASM_SPARC64_DMA_MAPPING_H
-
-#include <linux/scatterlist.h>
-#include <linux/mm.h>
-
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-struct dma_ops {
- void *(*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
- void (*free_coherent)(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle);
- dma_addr_t (*map_single)(struct device *dev, void *cpu_addr,
- size_t size,
- enum dma_data_direction direction);
- void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction);
- int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
- void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
- int nhwentries,
- enum dma_data_direction direction);
- void (*sync_single_for_cpu)(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction);
- void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
- int nelems,
- enum dma_data_direction direction);
-};
-extern const struct dma_ops *dma_ops;
-
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_set_mask(struct device *dev, u64 dma_mask);
-
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
-}
-
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
-}
-
-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size,
- enum dma_data_direction direction)
-{
- return dma_ops->map_single(dev, cpu_addr, size, direction);
-}
-
-static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_ops->unmap_single(dev, dma_addr, size, direction);
-}
-
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- return dma_ops->map_single(dev, page_address(page) + offset,
- size, direction);
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_ops->unmap_single(dev, dma_address, size, direction);
-}
-
-static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- return dma_ops->map_sg(dev, sg, nents, direction);
-}
-
-static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- dma_ops->unmap_sg(dev, sg, nents, direction);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev,
- dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
-{
- /* No flushing needed to sync cpu writes to the device. */
-}
-
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction)
-{
- /* No flushing needed to sync cpu writes to the device. */
-}
-
-
-static inline void dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction);
-}
-
-static inline void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- /* No flushing needed to sync cpu writes to the device. */
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return (dma_addr == DMA_ERROR_CODE);
-}
-
-static inline int dma_get_cache_alignment(void)
-{
- /* no easy way to get cache size on all processors, so return
- * the maximum possible, to be safe */
- return (1 << INTERNODE_CACHE_SHIFT);
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_is_consistent(d, h) (1)
-
-#endif /* _ASM_SPARC64_DMA_MAPPING_H */
diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h
index d27716cd38c1..b0f18e9893db 100644
--- a/arch/sparc/include/asm/ftrace.h
+++ b/arch/sparc/include/asm/ftrace.h
@@ -11,4 +11,15 @@ extern void _mcount(void);
#endif
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* reloction of mcount call site is the same as the address */
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
#endif /* _ASM_SPARC64_FTRACE */
diff --git a/arch/sparc/include/asm/kmap_types.h b/arch/sparc/include/asm/kmap_types.h
index 602f5e034f7a..aad21745fbb9 100644
--- a/arch/sparc/include/asm/kmap_types.h
+++ b/arch/sparc/include/asm/kmap_types.h
@@ -5,21 +5,6 @@
* is actually used on sparc. -DaveM
*/
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
#endif
diff --git a/arch/sparc/include/asm/mdesc.h b/arch/sparc/include/asm/mdesc.h
index 1acc7272e537..9faa046713fb 100644
--- a/arch/sparc/include/asm/mdesc.h
+++ b/arch/sparc/include/asm/mdesc.h
@@ -71,7 +71,8 @@ struct mdesc_notifier_client {
extern void mdesc_register_notifier(struct mdesc_notifier_client *client);
-extern void mdesc_fill_in_cpu_data(cpumask_t mask);
+extern void mdesc_fill_in_cpu_data(cpumask_t *mask);
+extern void mdesc_populate_present_mask(cpumask_t *mask);
extern void sun4v_mdesc_init(void);
diff --git a/arch/sparc/include/asm/percpu_64.h b/arch/sparc/include/asm/percpu_64.h
index bee64593023e..007aafb4ae97 100644
--- a/arch/sparc/include/asm/percpu_64.h
+++ b/arch/sparc/include/asm/percpu_64.h
@@ -7,20 +7,16 @@ register unsigned long __local_per_cpu_offset asm("g5");
#ifdef CONFIG_SMP
-extern void real_setup_per_cpu_areas(void);
+#include <asm/trap_block.h>
-extern unsigned long __per_cpu_base;
-extern unsigned long __per_cpu_shift;
#define __per_cpu_offset(__cpu) \
- (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift))
+ (trap_block[(__cpu)].__per_cpu_base)
#define per_cpu_offset(x) (__per_cpu_offset(x))
#define __my_cpu_offset __local_per_cpu_offset
#else /* ! SMP */
-#define real_setup_per_cpu_areas() do { } while (0)
-
#endif /* SMP */
#include <asm-generic/percpu.h>
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index 900d44714f8d..be8d7aaeb60d 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -86,6 +86,8 @@ extern int of_node_to_nid(struct device_node *dp);
#endif
extern void prom_build_devicetree(void);
+extern void of_populate_present_mask(void);
+extern void of_fill_in_cpu_data(void);
/* Dummy ref counting routines - to be implemented later */
static inline struct device_node *of_node_get(struct device_node *node)
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
new file mode 100644
index 000000000000..7e26b2db6211
--- /dev/null
+++ b/arch/sparc/include/asm/trap_block.h
@@ -0,0 +1,207 @@
+#ifndef _SPARC_TRAP_BLOCK_H
+#define _SPARC_TRAP_BLOCK_H
+
+#include <asm/hypervisor.h>
+#include <asm/asi.h>
+
+#ifndef __ASSEMBLY__
+
+/* Trap handling code needs to get at a few critical values upon
+ * trap entry and to process TSB misses. These cannot be in the
+ * per_cpu() area as we really need to lock them into the TLB and
+ * thus make them part of the main kernel image. As a result we
+ * try to make this as small as possible.
+ *
+ * This is padded out and aligned to 64-bytes to avoid false sharing
+ * on SMP.
+ */
+
+/* If you modify the size of this structure, please update
+ * TRAP_BLOCK_SZ_SHIFT below.
+ */
+struct thread_info;
+struct trap_per_cpu {
+/* D-cache line 1: Basic thread information, cpu and device mondo queues */
+ struct thread_info *thread;
+ unsigned long pgd_paddr;
+ unsigned long cpu_mondo_pa;
+ unsigned long dev_mondo_pa;
+
+/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
+ unsigned long resum_mondo_pa;
+ unsigned long resum_kernel_buf_pa;
+ unsigned long nonresum_mondo_pa;
+ unsigned long nonresum_kernel_buf_pa;
+
+/* Dcache lines 3, 4, 5, and 6: Hypervisor Fault Status */
+ struct hv_fault_status fault_info;
+
+/* Dcache line 7: Physical addresses of CPU send mondo block and CPU list. */
+ unsigned long cpu_mondo_block_pa;
+ unsigned long cpu_list_pa;
+ unsigned long tsb_huge;
+ unsigned long tsb_huge_temp;
+
+/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
+ unsigned long irq_worklist_pa;
+ unsigned int cpu_mondo_qmask;
+ unsigned int dev_mondo_qmask;
+ unsigned int resum_qmask;
+ unsigned int nonresum_qmask;
+ unsigned long __per_cpu_base;
+} __attribute__((aligned(64)));
+extern struct trap_per_cpu trap_block[NR_CPUS];
+extern void init_cur_cpu_trap(struct thread_info *);
+extern void setup_tba(void);
+extern int ncpus_probed;
+
+extern unsigned long real_hard_smp_processor_id(void);
+
+struct cpuid_patch_entry {
+ unsigned int addr;
+ unsigned int cheetah_safari[4];
+ unsigned int cheetah_jbus[4];
+ unsigned int starfire[4];
+ unsigned int sun4v[4];
+};
+extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
+
+struct sun4v_1insn_patch_entry {
+ unsigned int addr;
+ unsigned int insn;
+};
+extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch,
+ __sun4v_1insn_patch_end;
+
+struct sun4v_2insn_patch_entry {
+ unsigned int addr;
+ unsigned int insns[2];
+};
+extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
+ __sun4v_2insn_patch_end;
+
+
+#endif /* !(__ASSEMBLY__) */
+
+#define TRAP_PER_CPU_THREAD 0x00
+#define TRAP_PER_CPU_PGD_PADDR 0x08
+#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
+#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
+#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
+#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
+#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
+#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
+#define TRAP_PER_CPU_FAULT_INFO 0x40
+#define TRAP_PER_CPU_CPU_MONDO_BLOCK_PA 0xc0
+#define TRAP_PER_CPU_CPU_LIST_PA 0xc8
+#define TRAP_PER_CPU_TSB_HUGE 0xd0
+#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
+#define TRAP_PER_CPU_IRQ_WORKLIST_PA 0xe0
+#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe8
+#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xec
+#define TRAP_PER_CPU_RESUM_QMASK 0xf0
+#define TRAP_PER_CPU_NONRESUM_QMASK 0xf4
+#define TRAP_PER_CPU_PER_CPU_BASE 0xf8
+
+#define TRAP_BLOCK_SZ_SHIFT 8
+
+#include <asm/scratchpad.h>
+
+#define __GET_CPUID(REG) \
+ /* Spitfire implementation (default). */ \
+661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
+ srlx REG, 17, REG; \
+ and REG, 0x1f, REG; \
+ nop; \
+ .section .cpuid_patch, "ax"; \
+ /* Instruction location. */ \
+ .word 661b; \
+ /* Cheetah Safari implementation. */ \
+ ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
+ srlx REG, 17, REG; \
+ and REG, 0x3ff, REG; \
+ nop; \
+ /* Cheetah JBUS implementation. */ \
+ ldxa [%g0] ASI_JBUS_CONFIG, REG; \
+ srlx REG, 17, REG; \
+ and REG, 0x1f, REG; \
+ nop; \
+ /* Starfire implementation. */ \
+ sethi %hi(0x1fff40000d0 >> 9), REG; \
+ sllx REG, 9, REG; \
+ or REG, 0xd0, REG; \
+ lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
+ /* sun4v implementation. */ \
+ mov SCRATCHPAD_CPUID, REG; \
+ ldxa [REG] ASI_SCRATCHPAD, REG; \
+ nop; \
+ nop; \
+ .previous;
+
+#ifdef CONFIG_SMP
+
+#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ __GET_CPUID(TMP) \
+ sethi %hi(trap_block), DEST; \
+ sllx TMP, TRAP_BLOCK_SZ_SHIFT, TMP; \
+ or DEST, %lo(trap_block), DEST; \
+ add DEST, TMP, DEST; \
+
+/* Clobbers TMP, current address space PGD phys address into DEST. */
+#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
+
+/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
+#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
+
+/* Clobbers TMP, loads DEST with current thread info pointer. */
+#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
+
+/* Given the current thread info pointer in THR, load the per-cpu
+ * area base of the current processor into DEST. REG1, REG2, and REG3 are
+ * clobbered.
+ *
+ * You absolutely cannot use DEST as a temporary in this code. The
+ * reason is that traps can happen during execution, and return from
+ * trap will load the fully resolved DEST per-cpu base. This can corrupt
+ * the calculations done by the macro mid-stream.
+ */
+#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
+ lduh [THR + TI_CPU], REG1; \
+ sethi %hi(trap_block), REG2; \
+ sllx REG1, TRAP_BLOCK_SZ_SHIFT, REG1; \
+ or REG2, %lo(trap_block), REG2; \
+ add REG2, REG1, REG2; \
+ ldx [REG2 + TRAP_PER_CPU_PER_CPU_BASE], DEST;
+
+#else
+
+#define TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ sethi %hi(trap_block), DEST; \
+ or DEST, %lo(trap_block), DEST; \
+
+/* Uniprocessor versions, we know the cpuid is zero. */
+#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST;
+
+/* Clobbers TMP, loads local processor's IRQ work area into DEST. */
+#define TRAP_LOAD_IRQ_WORK_PA(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ add DEST, TRAP_PER_CPU_IRQ_WORKLIST_PA, DEST;
+
+#define TRAP_LOAD_THREAD_REG(DEST, TMP) \
+ TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \
+ ldx [DEST + TRAP_PER_CPU_THREAD], DEST;
+
+/* No per-cpu areas on uniprocessor, so no need to load DEST. */
+#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3)
+
+#endif /* !(CONFIG_SMP) */
+
+#endif /* _SPARC_TRAP_BLOCK_H */
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index b8eb71ef3163..b2c406de7d4f 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -394,8 +394,9 @@
#define __NR_accept4 323
#define __NR_preadv 324
#define __NR_pwritev 325
+#define __NR_rt_tgsigqueueinfo 326
-#define NR_SYSCALLS 326
+#define NR_SYSCALLS 327
#ifdef __32bit_syscall_numbers__
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 54742e58831c..475ce4696acd 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -37,6 +37,7 @@ obj-y += una_asm_$(BITS).o
obj-$(CONFIG_SPARC32) += muldiv.o
obj-y += prom_common.o
obj-y += prom_$(BITS).o
+obj-y += of_device_common.o
obj-y += of_device_$(BITS).o
obj-$(CONFIG_SPARC64) += prom_irqtrans.o
@@ -54,6 +55,7 @@ obj-$(CONFIG_SPARC64) += sstate.o
obj-$(CONFIG_SPARC64) += mdesc.o
obj-$(CONFIG_SPARC64) += pcr.o
obj-$(CONFIG_SPARC64) += nmi.o
+obj-$(CONFIG_SPARC64_SMP) += cpumap.o
# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation
obj-$(CONFIG_SPARC32) += devres.o
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
new file mode 100644
index 000000000000..7430ed080b23
--- /dev/null
+++ b/arch/sparc/kernel/cpumap.c
@@ -0,0 +1,431 @@
+/* cpumap.c: used for optimizing CPU assignment
+ *
+ * Copyright (C) 2009 Hong H. Pham <hong.pham@windriver.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cpumask.h>
+#include <linux/spinlock.h>
+#include <asm/cpudata.h>
+#include "cpumap.h"
+
+
+enum {
+ CPUINFO_LVL_ROOT = 0,
+ CPUINFO_LVL_NODE,
+ CPUINFO_LVL_CORE,
+ CPUINFO_LVL_PROC,
+ CPUINFO_LVL_MAX,
+};
+
+enum {
+ ROVER_NO_OP = 0,
+ /* Increment rover every time level is visited */
+ ROVER_INC_ON_VISIT = 1 << 0,
+ /* Increment parent's rover every time rover wraps around */
+ ROVER_INC_PARENT_ON_LOOP = 1 << 1,
+};
+
+struct cpuinfo_node {
+ int id;
+ int level;
+ int num_cpus; /* Number of CPUs in this hierarchy */
+ int parent_index;
+ int child_start; /* Array index of the first child node */
+ int child_end; /* Array index of the last child node */
+ int rover; /* Child node iterator */
+};
+
+struct cpuinfo_level {
+ int start_index; /* Index of first node of a level in a cpuinfo tree */
+ int end_index; /* Index of last node of a level in a cpuinfo tree */
+ int num_nodes; /* Number of nodes in a level in a cpuinfo tree */
+};
+
+struct cpuinfo_tree {
+ int total_nodes;
+
+ /* Offsets into nodes[] for each level of the tree */
+ struct cpuinfo_level level[CPUINFO_LVL_MAX];
+ struct cpuinfo_node nodes[0];
+};
+
+
+static struct cpuinfo_tree *cpuinfo_tree;
+
+static u16 cpu_distribution_map[NR_CPUS];
+static DEFINE_SPINLOCK(cpu_map_lock);
+
+
+/* Niagara optimized cpuinfo tree traversal. */
+static const int niagara_iterate_method[] = {
+ [CPUINFO_LVL_ROOT] = ROVER_NO_OP,
+
+ /* Strands (or virtual CPUs) within a core may not run concurrently
+ * on the Niagara, as instruction pipeline(s) are shared. Distribute
+ * work to strands in different cores first for better concurrency.
+ * Go to next NUMA node when all cores are used.
+ */
+ [CPUINFO_LVL_NODE] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
+
+ /* Strands are grouped together by proc_id in cpuinfo_sparc, i.e.
+ * a proc_id represents an instruction pipeline. Distribute work to
+ * strands in different proc_id groups if the core has multiple
+ * instruction pipelines (e.g. the Niagara 2/2+ has two).
+ */
+ [CPUINFO_LVL_CORE] = ROVER_INC_ON_VISIT,
+
+ /* Pick the next strand in the proc_id group. */
+ [CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT,
+};
+
+/* Generic cpuinfo tree traversal. Distribute work round robin across NUMA
+ * nodes.
+ */
+static const int generic_iterate_method[] = {
+ [CPUINFO_LVL_ROOT] = ROVER_INC_ON_VISIT,
+ [CPUINFO_LVL_NODE] = ROVER_NO_OP,
+ [CPUINFO_LVL_CORE] = ROVER_INC_PARENT_ON_LOOP,
+ [CPUINFO_LVL_PROC] = ROVER_INC_ON_VISIT|ROVER_INC_PARENT_ON_LOOP,
+};
+
+
+static int cpuinfo_id(int cpu, int level)
+{
+ int id;
+
+ switch (level) {
+ case CPUINFO_LVL_ROOT:
+ id = 0;
+ break;
+ case CPUINFO_LVL_NODE:
+ id = cpu_to_node(cpu);
+ break;
+ case CPUINFO_LVL_CORE:
+ id = cpu_data(cpu).core_id;
+ break;
+ case CPUINFO_LVL_PROC:
+ id = cpu_data(cpu).proc_id;
+ break;
+ default:
+ id = -EINVAL;
+ }
+ return id;
+}
+
+/*
+ * Enumerate the CPU information in __cpu_data to determine the start index,
+ * end index, and number of nodes for each level in the cpuinfo tree. The
+ * total number of cpuinfo nodes required to build the tree is returned.
+ */
+static int enumerate_cpuinfo_nodes(struct cpuinfo_level *tree_level)
+{
+ int prev_id[CPUINFO_LVL_MAX];
+ int i, n, num_nodes;
+
+ for (i = CPUINFO_LVL_ROOT; i < CPUINFO_LVL_MAX; i++) {
+ struct cpuinfo_level *lv = &tree_level[i];
+
+ prev_id[i] = -1;
+ lv->start_index = lv->end_index = lv->num_nodes = 0;
+ }
+
+ num_nodes = 1; /* Include the root node */
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (!cpu_online(i))
+ continue;
+
+ n = cpuinfo_id(i, CPUINFO_LVL_NODE);
+ if (n > prev_id[CPUINFO_LVL_NODE]) {
+ tree_level[CPUINFO_LVL_NODE].num_nodes++;
+ prev_id[CPUINFO_LVL_NODE] = n;
+ num_nodes++;
+ }
+ n = cpuinfo_id(i, CPUINFO_LVL_CORE);
+ if (n > prev_id[CPUINFO_LVL_CORE]) {
+ tree_level[CPUINFO_LVL_CORE].num_nodes++;
+ prev_id[CPUINFO_LVL_CORE] = n;
+ num_nodes++;
+ }
+ n = cpuinfo_id(i, CPUINFO_LVL_PROC);
+ if (n > prev_id[CPUINFO_LVL_PROC]) {
+ tree_level[CPUINFO_LVL_PROC].num_nodes++;
+ prev_id[CPUINFO_LVL_PROC] = n;
+ num_nodes++;
+ }
+ }
+
+ tree_level[CPUINFO_LVL_ROOT].num_nodes = 1;
+
+ n = tree_level[CPUINFO_LVL_NODE].num_nodes;
+ tree_level[CPUINFO_LVL_NODE].start_index = 1;
+ tree_level[CPUINFO_LVL_NODE].end_index = n;
+
+ n++;
+ tree_level[CPUINFO_LVL_CORE].start_index = n;
+ n += tree_level[CPUINFO_LVL_CORE].num_nodes;
+ tree_level[CPUINFO_LVL_CORE].end_index = n - 1;
+
+ tree_level[CPUINFO_LVL_PROC].start_index = n;
+ n += tree_level[CPUINFO_LVL_PROC].num_nodes;
+ tree_level[CPUINFO_LVL_PROC].end_index = n - 1;
+
+ return num_nodes;
+}
+
+/* Build a tree representation of the CPU hierarchy using the per CPU
+ * information in __cpu_data. Entries in __cpu_data[0..NR_CPUS] are
+ * assumed to be sorted in ascending order based on node, core_id, and
+ * proc_id (in order of significance).
+ */
+static struct cpuinfo_tree *build_cpuinfo_tree(void)
+{
+ struct cpuinfo_tree *new_tree;
+ struct cpuinfo_node *node;
+ struct cpuinfo_level tmp_level[CPUINFO_LVL_MAX];
+ int num_cpus[CPUINFO_LVL_MAX];
+ int level_rover[CPUINFO_LVL_MAX];
+ int prev_id[CPUINFO_LVL_MAX];
+ int n, id, cpu, prev_cpu, last_cpu, level;
+
+ n = enumerate_cpuinfo_nodes(tmp_level);
+
+ new_tree = kzalloc(sizeof(struct cpuinfo_tree) +
+ (sizeof(struct cpuinfo_node) * n), GFP_ATOMIC);
+ if (!new_tree)
+ return NULL;
+
+ new_tree->total_nodes = n;
+ memcpy(&new_tree->level, tmp_level, sizeof(tmp_level));
+
+ prev_cpu = cpu = first_cpu(cpu_online_map);
+
+ /* Initialize all levels in the tree with the first CPU */
+ for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) {
+ n = new_tree->level[level].start_index;
+
+ level_rover[level] = n;
+ node = &new_tree->nodes[n];
+
+ id = cpuinfo_id(cpu, level);
+ if (unlikely(id < 0)) {
+ kfree(new_tree);
+ return NULL;
+ }
+ node->id = id;
+ node->level = level;
+ node->num_cpus = 1;
+
+ node->parent_index = (level > CPUINFO_LVL_ROOT)
+ ? new_tree->level[level - 1].start_index : -1;
+
+ node->child_start = node->child_end = node->rover =
+ (level == CPUINFO_LVL_PROC)
+ ? cpu : new_tree->level[level + 1].start_index;
+
+ prev_id[level] = node->id;
+ num_cpus[level] = 1;
+ }
+
+ for (last_cpu = (num_possible_cpus() - 1); last_cpu >= 0; last_cpu--) {
+ if (cpu_online(last_cpu))
+ break;
+ }
+
+ while (++cpu <= last_cpu) {
+ if (!cpu_online(cpu))
+ continue;
+
+ for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT;
+ level--) {
+ id = cpuinfo_id(cpu, level);
+ if (unlikely(id < 0)) {
+ kfree(new_tree);
+ return NULL;
+ }
+
+ if ((id != prev_id[level]) || (cpu == last_cpu)) {
+ prev_id[level] = id;
+ node = &new_tree->nodes[level_rover[level]];
+ node->num_cpus = num_cpus[level];
+ num_cpus[level] = 1;
+
+ if (cpu == last_cpu)
+ node->num_cpus++;
+
+ /* Connect tree node to parent */
+ if (level == CPUINFO_LVL_ROOT)
+ node->parent_index = -1;
+ else
+ node->parent_index =
+ level_rover[level - 1];
+
+ if (level == CPUINFO_LVL_PROC) {
+ node->child_end =
+ (cpu == last_cpu) ? cpu : prev_cpu;
+ } else {
+ node->child_end =
+ level_rover[level + 1] - 1;
+ }
+
+ /* Initialize the next node in the same level */
+ n = ++level_rover[level];
+ if (n <= new_tree->level[level].end_index) {
+ node = &new_tree->nodes[n];
+ node->id = id;
+ node->level = level;
+
+ /* Connect node to child */
+ node->child_start = node->child_end =
+ node->rover =
+ (level == CPUINFO_LVL_PROC)
+ ? cpu : level_rover[level + 1];
+ }
+ } else
+ num_cpus[level]++;
+ }
+ prev_cpu = cpu;
+ }
+
+ return new_tree;
+}
+
+static void increment_rover(struct cpuinfo_tree *t, int node_index,
+ int root_index, const int *rover_inc_table)
+{
+ struct cpuinfo_node *node = &t->nodes[node_index];
+ int top_level, level;
+
+ top_level = t->nodes[root_index].level;
+ for (level = node->level; level >= top_level; level--) {
+ node->rover++;
+ if (node->rover <= node->child_end)
+ return;
+
+ node->rover = node->child_start;
+ /* If parent's rover does not need to be adjusted, stop here. */
+ if ((level == top_level) ||
+ !(rover_inc_table[level] & ROVER_INC_PARENT_ON_LOOP))
+ return;
+
+ node = &t->nodes[node->parent_index];
+ }
+}
+
+static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
+{
+ const int *rover_inc_table;
+ int level, new_index, index = root_index;
+
+ switch (sun4v_chip_type) {
+ case SUN4V_CHIP_NIAGARA1:
+ case SUN4V_CHIP_NIAGARA2:
+ rover_inc_table = niagara_iterate_method;
+ break;
+ default:
+ rover_inc_table = generic_iterate_method;
+ }
+
+ for (level = t->nodes[root_index].level; level < CPUINFO_LVL_MAX;
+ level++) {
+ new_index = t->nodes[index].rover;
+ if (rover_inc_table[level] & ROVER_INC_ON_VISIT)
+ increment_rover(t, index, root_index, rover_inc_table);
+
+ index = new_index;
+ }
+ return index;
+}
+
+static void _cpu_map_rebuild(void)
+{
+ int i;
+
+ if (cpuinfo_tree) {
+ kfree(cpuinfo_tree);
+ cpuinfo_tree = NULL;
+ }
+
+ cpuinfo_tree = build_cpuinfo_tree();
+ if (!cpuinfo_tree)
+ return;
+
+ /* Build CPU distribution map that spans all online CPUs. No need
+ * to check if the CPU is online, as that is done when the cpuinfo
+ * tree is being built.
+ */
+ for (i = 0; i < cpuinfo_tree->nodes[0].num_cpus; i++)
+ cpu_distribution_map[i] = iterate_cpu(cpuinfo_tree, 0);
+}
+
+/* Fallback if the cpuinfo tree could not be built. CPU mapping is linear
+ * round robin.
+ */
+static int simple_map_to_cpu(unsigned int index)
+{
+ int i, end, cpu_rover;
+
+ cpu_rover = 0;
+ end = index % num_online_cpus();
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (cpu_online(cpu_rover)) {
+ if (cpu_rover >= end)
+ return cpu_rover;
+
+ cpu_rover++;
+ }
+ }
+
+ /* Impossible, since num_online_cpus() <= num_possible_cpus() */
+ return first_cpu(cpu_online_map);
+}
+
+static int _map_to_cpu(unsigned int index)
+{
+ struct cpuinfo_node *root_node;
+
+ if (unlikely(!cpuinfo_tree)) {
+ _cpu_map_rebuild();
+ if (!cpuinfo_tree)
+ return simple_map_to_cpu(index);
+ }
+
+ root_node = &cpuinfo_tree->nodes[0];
+#ifdef CONFIG_HOTPLUG_CPU
+ if (unlikely(root_node->num_cpus != num_online_cpus())) {
+ _cpu_map_rebuild();
+ if (!cpuinfo_tree)
+ return simple_map_to_cpu(index);
+ }
+#endif
+ return cpu_distribution_map[index % root_node->num_cpus];
+}
+
+int map_to_cpu(unsigned int index)
+{
+ int mapped_cpu;
+ unsigned long flag;
+
+ spin_lock_irqsave(&cpu_map_lock, flag);
+ mapped_cpu = _map_to_cpu(index);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ while (unlikely(!cpu_online(mapped_cpu)))
+ mapped_cpu = _map_to_cpu(index);
+#endif
+ spin_unlock_irqrestore(&cpu_map_lock, flag);
+ return mapped_cpu;
+}
+EXPORT_SYMBOL(map_to_cpu);
+
+void cpu_map_rebuild(void)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&cpu_map_lock, flag);
+ _cpu_map_rebuild();
+ spin_unlock_irqrestore(&cpu_map_lock, flag);
+}
diff --git a/arch/sparc/kernel/cpumap.h b/arch/sparc/kernel/cpumap.h
new file mode 100644
index 000000000000..e639880ab864
--- /dev/null
+++ b/arch/sparc/kernel/cpumap.h
@@ -0,0 +1,16 @@
+#ifndef _CPUMAP_H
+#define _CPUMAP_H
+
+#ifdef CONFIG_SMP
+extern void cpu_map_rebuild(void);
+extern int map_to_cpu(unsigned int index);
+#define cpu_map_init() cpu_map_rebuild()
+#else
+#define cpu_map_init() do {} while (0)
+static inline int map_to_cpu(unsigned int index)
+{
+ return raw_smp_processor_id();
+}
+#endif
+
+#endif
diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c
index ebc8403b035e..524c32f97c55 100644
--- a/arch/sparc/kernel/dma.c
+++ b/arch/sparc/kernel/dma.c
@@ -35,8 +35,8 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
}
EXPORT_SYMBOL(dma_set_mask);
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+static void *dma32_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
@@ -44,10 +44,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
#endif
return sbus_alloc_consistent(dev, size, dma_handle);
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+static void dma32_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -58,38 +57,10 @@ void dma_free_coherent(struct device *dev, size_t size,
#endif
sbus_free_consistent(dev, size, cpu_addr, dma_handle);
}
-EXPORT_SYMBOL(dma_free_coherent);
-dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
- return pci_map_single(to_pci_dev(dev), cpu_addr,
- size, (int)direction);
-#endif
- return sbus_map_single(dev, cpu_addr, size, (int)direction);
-}
-EXPORT_SYMBOL(dma_map_single);
-
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size,
- enum dma_data_direction direction)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type) {
- pci_unmap_single(to_pci_dev(dev), dma_addr,
- size, (int)direction);
- return;
- }
-#endif
- sbus_unmap_single(dev, dma_addr, size, (int)direction);
-}
-EXPORT_SYMBOL(dma_unmap_single);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static dma_addr_t dma32_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
@@ -99,10 +70,9 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
return sbus_map_single(dev, page_address(page) + offset,
size, (int)direction);
}
-EXPORT_SYMBOL(dma_map_page);
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction)
+static void dma32_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size, enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -113,10 +83,9 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
#endif
sbus_unmap_single(dev, dma_address, size, (int)direction);
}
-EXPORT_SYMBOL(dma_unmap_page);
-int dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
+static int dma32_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
@@ -124,10 +93,9 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg,
#endif
return sbus_map_sg(dev, sg, nents, direction);
}
-EXPORT_SYMBOL(dma_map_sg);
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
+void dma32_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -137,10 +105,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
#endif
sbus_unmap_sg(dev, sg, nents, (int)direction);
}
-EXPORT_SYMBOL(dma_unmap_sg);
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static void dma32_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -151,10 +119,10 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
#endif
sbus_dma_sync_single_for_cpu(dev, dma_handle, size, (int) direction);
}
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static void dma32_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -165,28 +133,9 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
#endif
sbus_dma_sync_single_for_device(dev, dma_handle, size, (int) direction);
}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-
-void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset,
- size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
-}
-EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
-
-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
-}
-EXPORT_SYMBOL(dma_sync_single_range_for_device);
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction)
+static void dma32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -197,11 +146,10 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
#endif
BUG();
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
-void dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
+static void dma32_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type) {
@@ -212,16 +160,19 @@ void dma_sync_sg_for_device(struct device *dev,
#endif
BUG();
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return (dma_addr == DMA_ERROR_CODE);
-}
-EXPORT_SYMBOL(dma_mapping_error);
-
-int dma_get_cache_alignment(void)
-{
- return 32;
-}
-EXPORT_SYMBOL(dma_get_cache_alignment);
+static const struct dma_ops dma32_dma_ops = {
+ .alloc_coherent = dma32_alloc_coherent,
+ .free_coherent = dma32_free_coherent,
+ .map_page = dma32_map_page,
+ .unmap_page = dma32_unmap_page,
+ .map_sg = dma32_map_sg,
+ .unmap_sg = dma32_unmap_sg,
+ .sync_single_for_cpu = dma32_sync_single_for_cpu,
+ .sync_single_for_device = dma32_sync_single_for_device,
+ .sync_sg_for_cpu = dma32_sync_sg_for_cpu,
+ .sync_sg_for_device = dma32_sync_sg_for_device,
+};
+
+const struct dma_ops *dma_ops = &dma32_dma_ops;
+EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 90350f838f05..4a700f4b79ce 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -544,7 +544,8 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp,
resp_len, ncpus, mask,
DR_CPU_STAT_CONFIGURED);
- mdesc_fill_in_cpu_data(*mask);
+ mdesc_populate_present_mask(mask);
+ mdesc_fill_in_cpu_data(mask);
for_each_cpu_mask(cpu, *mask) {
int err;
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index d0218e73f982..d3b1a3076569 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -7,14 +7,10 @@
#include <asm/ftrace.h>
+#ifdef CONFIG_DYNAMIC_FTRACE
static const u32 ftrace_nop = 0x01000000;
-unsigned char *ftrace_nop_replace(void)
-{
- return (char *)&ftrace_nop;
-}
-
-unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+static u32 ftrace_call_replace(unsigned long ip, unsigned long addr)
{
static u32 call;
s32 off;
@@ -22,15 +18,11 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
off = ((s32)addr - (s32)ip);
call = 0x40000000 | ((u32)off >> 2);
- return (unsigned char *) &call;
+ return call;
}
-int
-ftrace_modify_code(unsigned long ip, unsigned char *old_code,
- unsigned char *new_code)
+static int ftrace_modify_code(unsigned long ip, u32 old, u32 new)
{
- u32 old = *(u32 *)old_code;
- u32 new = *(u32 *)new_code;
u32 replaced;
int faulted;
@@ -59,18 +51,43 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
return faulted;
}
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long ip = rec->ip;
+ u32 old, new;
+
+ old = ftrace_call_replace(ip, addr);
+ new = ftrace_nop;
+ return ftrace_modify_code(ip, old, new);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long ip = rec->ip;
+ u32 old, new;
+
+ old = ftrace_nop;
+ new = ftrace_call_replace(ip, addr);
+ return ftrace_modify_code(ip, old, new);
+}
+
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
- unsigned char old[MCOUNT_INSN_SIZE], *new;
+ u32 old, new;
- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
+ old = *(u32 *) &ftrace_call;
new = ftrace_call_replace(ip, (unsigned long)func);
return ftrace_modify_code(ip, old, new);
}
int __init ftrace_dyn_arch_init(void *data)
{
- ftrace_mcount_set(data);
+ unsigned long *p = data;
+
+ *p = 0;
+
return 0;
}
+#endif
+
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 91bf4c7f79b9..f8f21050448b 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -641,28 +641,6 @@ tlb_fixup_done:
/* Not reached... */
1:
- /* If we boot on a non-zero cpu, all of the per-cpu
- * variable references we make before setting up the
- * per-cpu areas will use a bogus offset. Put a
- * compensating factor into __per_cpu_base to handle
- * this cleanly.
- *
- * What the per-cpu code calculates is:
- *
- * __per_cpu_base + (cpu << __per_cpu_shift)
- *
- * These two variables are zero initially, so to
- * make it all cancel out to zero we need to put
- * "0 - (cpu << 0)" into __per_cpu_base so that the
- * above formula evaluates to zero.
- *
- * We cannot even perform a printk() until this stuff
- * is setup as that calls cpu_clock() which uses
- * per-cpu variables.
- */
- sub %g0, %o0, %o1
- sethi %hi(__per_cpu_base), %o2
- stx %o1, [%o2 + %lo(__per_cpu_base)]
#else
mov 0, %o0
#endif
diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c
index f28cb8278e98..28125c5b3d3c 100644
--- a/arch/sparc/kernel/init_task.c
+++ b/arch/sparc/kernel/init_task.c
@@ -10,10 +10,7 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_mm);
EXPORT_SYMBOL(init_task);
/* .text section in head.S is aligned at 8k boundary and this gets linked
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index d8900e1d5aad..0aeaefe696b9 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -351,8 +351,9 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
free_pages((unsigned long)cpu, order);
}
-static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
- enum dma_data_direction direction)
+static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t sz,
+ enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
@@ -368,7 +369,7 @@ static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
if (unlikely(direction == DMA_NONE))
goto bad_no_ctx;
- oaddr = (unsigned long)ptr;
+ oaddr = (unsigned long)(page_address(page) + offset);
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
@@ -472,8 +473,8 @@ do_flush_sync:
vaddr, ctx, npages);
}
-static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
- size_t sz, enum dma_data_direction direction)
+static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
+ size_t sz, enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
@@ -824,8 +825,8 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
static const struct dma_ops sun4u_dma_ops = {
.alloc_coherent = dma_4u_alloc_coherent,
.free_coherent = dma_4u_free_coherent,
- .map_single = dma_4u_map_single,
- .unmap_single = dma_4u_unmap_single,
+ .map_page = dma_4u_map_page,
+ .unmap_page = dma_4u_unmap_page,
.map_sg = dma_4u_map_sg,
.unmap_sg = dma_4u_unmap_sg,
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e5e78f9cfc95..bd075054942b 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -45,6 +45,7 @@
#include <asm/cacheflush.h>
#include "entry.h"
+#include "cpumap.h"
#define NUM_IVECS (IMAP_INR + 1)
@@ -256,35 +257,13 @@ static int irq_choose_cpu(unsigned int virt_irq)
int cpuid;
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
- if (cpus_equal(mask, CPU_MASK_ALL)) {
- static int irq_rover;
- static DEFINE_SPINLOCK(irq_rover_lock);
- unsigned long flags;
-
- /* Round-robin distribution... */
- do_round_robin:
- spin_lock_irqsave(&irq_rover_lock, flags);
-
- while (!cpu_online(irq_rover)) {
- if (++irq_rover >= nr_cpu_ids)
- irq_rover = 0;
- }
- cpuid = irq_rover;
- do {
- if (++irq_rover >= nr_cpu_ids)
- irq_rover = 0;
- } while (!cpu_online(irq_rover));
-
- spin_unlock_irqrestore(&irq_rover_lock, flags);
+ if (cpus_equal(mask, cpu_online_map)) {
+ cpuid = map_to_cpu(virt_irq);
} else {
cpumask_t tmp;
cpus_and(tmp, cpu_online_map, mask);
-
- if (cpus_empty(tmp))
- goto do_round_robin;
-
- cpuid = first_cpu(tmp);
+ cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
}
return cpuid;
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index f0e6ed23a468..938da19dc065 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -574,7 +574,7 @@ static void __init report_platform_properties(void)
mdesc_release(hp);
}
-static void __devinit fill_in_one_cache(cpuinfo_sparc *c,
+static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c,
struct mdesc_handle *hp,
u64 mp)
{
@@ -619,8 +619,7 @@ static void __devinit fill_in_one_cache(cpuinfo_sparc *c,
}
}
-static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp,
- int core_id)
+static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
{
u64 a;
@@ -653,7 +652,7 @@ static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp,
}
}
-static void __devinit set_core_ids(struct mdesc_handle *hp)
+static void __cpuinit set_core_ids(struct mdesc_handle *hp)
{
int idx;
u64 mp;
@@ -678,8 +677,7 @@ static void __devinit set_core_ids(struct mdesc_handle *hp)
}
}
-static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp,
- int proc_id)
+static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
{
u64 a;
@@ -698,8 +696,7 @@ static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp,
}
}
-static void __devinit __set_proc_ids(struct mdesc_handle *hp,
- const char *exec_unit_name)
+static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
{
int idx;
u64 mp;
@@ -720,13 +717,13 @@ static void __devinit __set_proc_ids(struct mdesc_handle *hp,
}
}
-static void __devinit set_proc_ids(struct mdesc_handle *hp)
+static void __cpuinit set_proc_ids(struct mdesc_handle *hp)
{
__set_proc_ids(hp, "exec_unit");
__set_proc_ids(hp, "exec-unit");
}
-static void __devinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
+static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
unsigned char def)
{
u64 val;
@@ -745,7 +742,7 @@ use_default:
*mask = ((1U << def) * 64U) - 1U;
}
-static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
+static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
struct trap_per_cpu *tb)
{
const u64 *val;
@@ -763,23 +760,15 @@ static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
}
-void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask)
+static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
{
struct mdesc_handle *hp = mdesc_grab();
+ void *ret = NULL;
u64 mp;
- ncpus_probed = 0;
mdesc_for_each_node_by_name(hp, mp, "cpu") {
const u64 *id = mdesc_get_property(hp, mp, "id", NULL);
- const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
- struct trap_per_cpu *tb;
- cpuinfo_sparc *c;
- int cpuid;
- u64 a;
-
- ncpus_probed++;
-
- cpuid = *id;
+ int cpuid = *id;
#ifdef CONFIG_SMP
if (cpuid >= NR_CPUS) {
@@ -788,62 +777,104 @@ void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask)
cpuid, NR_CPUS);
continue;
}
- if (!cpu_isset(cpuid, mask))
+ if (!cpu_isset(cpuid, *mask))
continue;
-#else
- /* On uniprocessor we only want the values for the
- * real physical cpu the kernel booted onto, however
- * cpu_data() only has one entry at index 0.
- */
- if (cpuid != real_hard_smp_processor_id())
- continue;
- cpuid = 0;
#endif
- c = &cpu_data(cpuid);
- c->clock_tick = *cfreq;
+ ret = func(hp, mp, cpuid, arg);
+ if (ret)
+ goto out;
+ }
+out:
+ mdesc_release(hp);
+ return ret;
+}
- tb = &trap_block[cpuid];
- get_mondo_data(hp, mp, tb);
+static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
+{
+ ncpus_probed++;
+#ifdef CONFIG_SMP
+ set_cpu_present(cpuid, true);
+#endif
+ return NULL;
+}
- mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
- u64 j, t = mdesc_arc_target(hp, a);
- const char *t_name;
+void __cpuinit mdesc_populate_present_mask(cpumask_t *mask)
+{
+ if (tlb_type != hypervisor)
+ return;
- t_name = mdesc_node_name(hp, t);
- if (!strcmp(t_name, "cache")) {
- fill_in_one_cache(c, hp, t);
- continue;
- }
+ ncpus_probed = 0;
+ mdesc_iterate_over_cpus(record_one_cpu, NULL, mask);
+}
- mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
- u64 n = mdesc_arc_target(hp, j);
- const char *n_name;
+static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
+{
+ const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
+ struct trap_per_cpu *tb;
+ cpuinfo_sparc *c;
+ u64 a;
- n_name = mdesc_node_name(hp, n);
- if (!strcmp(n_name, "cache"))
- fill_in_one_cache(c, hp, n);
- }
+#ifndef CONFIG_SMP
+ /* On uniprocessor we only want the values for the
+ * real physical cpu the kernel booted onto, however
+ * cpu_data() only has one entry at index 0.
+ */
+ if (cpuid != real_hard_smp_processor_id())
+ return NULL;
+ cpuid = 0;
+#endif
+
+ c = &cpu_data(cpuid);
+ c->clock_tick = *cfreq;
+
+ tb = &trap_block[cpuid];
+ get_mondo_data(hp, mp, tb);
+
+ mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
+ u64 j, t = mdesc_arc_target(hp, a);
+ const char *t_name;
+
+ t_name = mdesc_node_name(hp, t);
+ if (!strcmp(t_name, "cache")) {
+ fill_in_one_cache(c, hp, t);
+ continue;
}
-#ifdef CONFIG_SMP
- cpu_set(cpuid, cpu_present_map);
-#endif
+ mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
+ u64 n = mdesc_arc_target(hp, j);
+ const char *n_name;
- c->core_id = 0;
- c->proc_id = -1;
+ n_name = mdesc_node_name(hp, n);
+ if (!strcmp(n_name, "cache"))
+ fill_in_one_cache(c, hp, n);
+ }
}
+ c->core_id = 0;
+ c->proc_id = -1;
+
+ return NULL;
+}
+
+void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask)
+{
+ struct mdesc_handle *hp;
+
+ mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
+
#ifdef CONFIG_SMP
sparc64_multi_core = 1;
#endif
+ hp = mdesc_grab();
+
set_core_ids(hp);
set_proc_ids(hp);
- smp_fill_in_sib_core_maps();
-
mdesc_release(hp);
+
+ smp_fill_in_sib_core_maps();
}
static ssize_t mdesc_read(struct file *file, char __user *buf,
@@ -887,7 +918,6 @@ void __init sun4v_mdesc_init(void)
{
struct mdesc_handle *hp;
unsigned long len, real_len, status;
- cpumask_t mask;
(void) sun4v_mach_desc(0UL, 0UL, &len);
@@ -911,7 +941,4 @@ void __init sun4v_mdesc_init(void)
cur_mdesc = hp;
report_platform_properties();
-
- cpus_setall(mask);
- mdesc_fill_in_cpu_data(mask);
}
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index c8f14c1dc521..90396702ea2c 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -6,159 +6,11 @@
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/irq.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-static int node_match(struct device *dev, void *data)
-{
- struct of_device *op = to_of_device(dev);
- struct device_node *dp = data;
-
- return (op->node == dp);
-}
-
-struct of_device *of_find_device_by_node(struct device_node *dp)
-{
- struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
- dp, node_match);
-
- if (dev)
- return to_of_device(dev);
-
- return NULL;
-}
-EXPORT_SYMBOL(of_find_device_by_node);
-
-unsigned int irq_of_parse_and_map(struct device_node *node, int index)
-{
- struct of_device *op = of_find_device_by_node(node);
-
- if (!op || index >= op->num_irqs)
- return 0;
-
- return op->irqs[index];
-}
-EXPORT_SYMBOL(irq_of_parse_and_map);
-
-/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
- * BUS and propagate to all child of_device objects.
- */
-void of_propagate_archdata(struct of_device *bus)
-{
- struct dev_archdata *bus_sd = &bus->dev.archdata;
- struct device_node *bus_dp = bus->node;
- struct device_node *dp;
-
- for (dp = bus_dp->child; dp; dp = dp->sibling) {
- struct of_device *op = of_find_device_by_node(dp);
-
- op->dev.archdata.iommu = bus_sd->iommu;
- op->dev.archdata.stc = bus_sd->stc;
- op->dev.archdata.host_controller = bus_sd->host_controller;
- op->dev.archdata.numa_node = bus_sd->numa_node;
-
- if (dp->child)
- of_propagate_archdata(op);
- }
-}
-
-struct bus_type of_platform_bus_type;
-EXPORT_SYMBOL(of_platform_bus_type);
-
-static inline u64 of_read_addr(const u32 *cell, int size)
-{
- u64 r = 0;
- while (size--)
- r = (r << 32) | *(cell++);
- return r;
-}
-
-static void __init get_cells(struct device_node *dp,
- int *addrc, int *sizec)
-{
- if (addrc)
- *addrc = of_n_addr_cells(dp);
- if (sizec)
- *sizec = of_n_size_cells(dp);
-}
-
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS 4
-
-struct of_bus {
- const char *name;
- const char *addr_prop_name;
- int (*match)(struct device_node *parent);
- void (*count_cells)(struct device_node *child,
- int *addrc, int *sizec);
- int (*map)(u32 *addr, const u32 *range,
- int na, int ns, int pna);
- unsigned long (*get_flags)(const u32 *addr, unsigned long);
-};
-
-/*
- * Default translator (generic bus)
- */
-
-static void of_bus_default_count_cells(struct device_node *dev,
- int *addrc, int *sizec)
-{
- get_cells(dev, addrc, sizec);
-}
-
-/* Make sure the least significant 64-bits are in-range. Even
- * for 3 or 4 cell values it is a good enough approximation.
- */
-static int of_out_of_range(const u32 *addr, const u32 *base,
- const u32 *size, int na, int ns)
-{
- u64 a = of_read_addr(addr, na);
- u64 b = of_read_addr(base, na);
-
- if (a < b)
- return 1;
-
- b += of_read_addr(size, ns);
- if (a >= b)
- return 1;
-
- return 0;
-}
-
-static int of_bus_default_map(u32 *addr, const u32 *range,
- int na, int ns, int pna)
-{
- u32 result[OF_MAX_ADDR_CELLS];
- int i;
-
- if (ns > 2) {
- printk("of_device: Cannot handle size cells (%d) > 2.", ns);
- return -EINVAL;
- }
-
- if (of_out_of_range(addr, range, range + na + pna, na, ns))
- return -EINVAL;
-
- /* Start with the parent range base. */
- memcpy(result, range + na, pna * 4);
-
- /* Add in the child address offset. */
- for (i = 0; i < na; i++)
- result[pna - 1 - i] +=
- (addr[na - 1 - i] -
- range[na - 1 - i]);
-
- memcpy(addr, result, pna * 4);
-
- return 0;
-}
-
-static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
-{
- if (flags)
- return flags;
- return IORESOURCE_MEM;
-}
+#include "of_device_common.h"
/*
* PCI bus specific translator
@@ -240,47 +92,6 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
return flags;
}
-/*
- * SBUS bus specific translator
- */
-
-static int of_bus_sbus_match(struct device_node *np)
-{
- struct device_node *dp = np;
-
- while (dp) {
- if (!strcmp(dp->name, "sbus") ||
- !strcmp(dp->name, "sbi"))
- return 1;
-
- /* Have a look at use_1to1_mapping(). We're trying
- * to match SBUS if that's the top-level bus and we
- * don't have some intervening real bus that provides
- * ranges based translations.
- */
- if (of_find_property(dp, "ranges", NULL) != NULL)
- break;
-
- dp = dp->parent;
- }
-
- return 0;
-}
-
-static void of_bus_sbus_count_cells(struct device_node *child,
- int *addrc, int *sizec)
-{
- if (addrc)
- *addrc = 2;
- if (sizec)
- *sizec = 1;
-}
-
-static int of_bus_sbus_map(u32 *addr, const u32 *range, int na, int ns, int pna)
-{
- return of_bus_default_map(addr, range, na, ns, pna);
-}
-
static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
{
return IORESOURCE_MEM;
@@ -307,7 +118,7 @@ static struct of_bus of_busses[] = {
.addr_prop_name = "reg",
.match = of_bus_sbus_match,
.count_cells = of_bus_sbus_count_cells,
- .map = of_bus_sbus_map,
+ .map = of_bus_default_map,
.get_flags = of_bus_sbus_get_flags,
},
/* Default */
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 5ac287ac03de..881947e59e95 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -10,6 +10,8 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include "of_device_common.h"
+
void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name)
{
unsigned long ret = res->start + offset;
@@ -35,156 +37,6 @@ void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
}
EXPORT_SYMBOL(of_iounmap);
-static int node_match(struct device *dev, void *data)
-{
- struct of_device *op = to_of_device(dev);
- struct device_node *dp = data;
-
- return (op->node == dp);
-}
-
-struct of_device *of_find_device_by_node(struct device_node *dp)
-{
- struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
- dp, node_match);
-
- if (dev)
- return to_of_device(dev);
-
- return NULL;
-}
-EXPORT_SYMBOL(of_find_device_by_node);
-
-unsigned int irq_of_parse_and_map(struct device_node *node, int index)
-{
- struct of_device *op = of_find_device_by_node(node);
-
- if (!op || index >= op->num_irqs)
- return 0;
-
- return op->irqs[index];
-}
-EXPORT_SYMBOL(irq_of_parse_and_map);
-
-/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
- * BUS and propagate to all child of_device objects.
- */
-void of_propagate_archdata(struct of_device *bus)
-{
- struct dev_archdata *bus_sd = &bus->dev.archdata;
- struct device_node *bus_dp = bus->node;
- struct device_node *dp;
-
- for (dp = bus_dp->child; dp; dp = dp->sibling) {
- struct of_device *op = of_find_device_by_node(dp);
-
- op->dev.archdata.iommu = bus_sd->iommu;
- op->dev.archdata.stc = bus_sd->stc;
- op->dev.archdata.host_controller = bus_sd->host_controller;
- op->dev.archdata.numa_node = bus_sd->numa_node;
-
- if (dp->child)
- of_propagate_archdata(op);
- }
-}
-
-struct bus_type of_platform_bus_type;
-EXPORT_SYMBOL(of_platform_bus_type);
-
-static inline u64 of_read_addr(const u32 *cell, int size)
-{
- u64 r = 0;
- while (size--)
- r = (r << 32) | *(cell++);
- return r;
-}
-
-static void get_cells(struct device_node *dp, int *addrc, int *sizec)
-{
- if (addrc)
- *addrc = of_n_addr_cells(dp);
- if (sizec)
- *sizec = of_n_size_cells(dp);
-}
-
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS 4
-
-struct of_bus {
- const char *name;
- const char *addr_prop_name;
- int (*match)(struct device_node *parent);
- void (*count_cells)(struct device_node *child,
- int *addrc, int *sizec);
- int (*map)(u32 *addr, const u32 *range,
- int na, int ns, int pna);
- unsigned long (*get_flags)(const u32 *addr, unsigned long);
-};
-
-/*
- * Default translator (generic bus)
- */
-
-static void of_bus_default_count_cells(struct device_node *dev,
- int *addrc, int *sizec)
-{
- get_cells(dev, addrc, sizec);
-}
-
-/* Make sure the least significant 64-bits are in-range. Even
- * for 3 or 4 cell values it is a good enough approximation.
- */
-static int of_out_of_range(const u32 *addr, const u32 *base,
- const u32 *size, int na, int ns)
-{
- u64 a = of_read_addr(addr, na);
- u64 b = of_read_addr(base, na);
-
- if (a < b)
- return 1;
-
- b += of_read_addr(size, ns);
- if (a >= b)
- return 1;
-
- return 0;
-}
-
-static int of_bus_default_map(u32 *addr, const u32 *range,
- int na, int ns, int pna)
-{
- u32 result[OF_MAX_ADDR_CELLS];
- int i;
-
- if (ns > 2) {
- printk("of_device: Cannot handle size cells (%d) > 2.", ns);
- return -EINVAL;
- }
-
- if (of_out_of_range(addr, range, range + na + pna, na, ns))
- return -EINVAL;
-
- /* Start with the parent range base. */
- memcpy(result, range + na, pna * 4);
-
- /* Add in the child address offset. */
- for (i = 0; i < na; i++)
- result[pna - 1 - i] +=
- (addr[na - 1 - i] -
- range[na - 1 - i]);
-
- memcpy(addr, result, pna * 4);
-
- return 0;
-}
-
-static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
-{
- if (flags)
- return flags;
- return IORESOURCE_MEM;
-}
-
/*
* PCI bus specific translator
*/
@@ -295,42 +147,6 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
}
/*
- * SBUS bus specific translator
- */
-
-static int of_bus_sbus_match(struct device_node *np)
-{
- struct device_node *dp = np;
-
- while (dp) {
- if (!strcmp(dp->name, "sbus") ||
- !strcmp(dp->name, "sbi"))
- return 1;
-
- /* Have a look at use_1to1_mapping(). We're trying
- * to match SBUS if that's the top-level bus and we
- * don't have some intervening real bus that provides
- * ranges based translations.
- */
- if (of_find_property(dp, "ranges", NULL) != NULL)
- break;
-
- dp = dp->parent;
- }
-
- return 0;
-}
-
-static void of_bus_sbus_count_cells(struct device_node *child,
- int *addrc, int *sizec)
-{
- if (addrc)
- *addrc = 2;
- if (sizec)
- *sizec = 1;
-}
-
-/*
* FHC/Central bus specific translator.
*
* This is just needed to hard-code the address and size cell
diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
new file mode 100644
index 000000000000..cb8eb799bb6c
--- /dev/null
+++ b/arch/sparc/kernel/of_device_common.c
@@ -0,0 +1,174 @@
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+#include "of_device_common.h"
+
+static int node_match(struct device *dev, void *data)
+{
+ struct of_device *op = to_of_device(dev);
+ struct device_node *dp = data;
+
+ return (op->node == dp);
+}
+
+struct of_device *of_find_device_by_node(struct device_node *dp)
+{
+ struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
+ dp, node_match);
+
+ if (dev)
+ return to_of_device(dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL(of_find_device_by_node);
+
+unsigned int irq_of_parse_and_map(struct device_node *node, int index)
+{
+ struct of_device *op = of_find_device_by_node(node);
+
+ if (!op || index >= op->num_irqs)
+ return 0;
+
+ return op->irqs[index];
+}
+EXPORT_SYMBOL(irq_of_parse_and_map);
+
+/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
+ * BUS and propagate to all child of_device objects.
+ */
+void of_propagate_archdata(struct of_device *bus)
+{
+ struct dev_archdata *bus_sd = &bus->dev.archdata;
+ struct device_node *bus_dp = bus->node;
+ struct device_node *dp;
+
+ for (dp = bus_dp->child; dp; dp = dp->sibling) {
+ struct of_device *op = of_find_device_by_node(dp);
+
+ op->dev.archdata.iommu = bus_sd->iommu;
+ op->dev.archdata.stc = bus_sd->stc;
+ op->dev.archdata.host_controller = bus_sd->host_controller;
+ op->dev.archdata.numa_node = bus_sd->numa_node;
+
+ if (dp->child)
+ of_propagate_archdata(op);
+ }
+}
+
+struct bus_type of_platform_bus_type;
+EXPORT_SYMBOL(of_platform_bus_type);
+
+static void get_cells(struct device_node *dp, int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = of_n_addr_cells(dp);
+ if (sizec)
+ *sizec = of_n_size_cells(dp);
+}
+
+/*
+ * Default translator (generic bus)
+ */
+
+void of_bus_default_count_cells(struct device_node *dev, int *addrc, int *sizec)
+{
+ get_cells(dev, addrc, sizec);
+}
+
+/* Make sure the least significant 64-bits are in-range. Even
+ * for 3 or 4 cell values it is a good enough approximation.
+ */
+int of_out_of_range(const u32 *addr, const u32 *base,
+ const u32 *size, int na, int ns)
+{
+ u64 a = of_read_addr(addr, na);
+ u64 b = of_read_addr(base, na);
+
+ if (a < b)
+ return 1;
+
+ b += of_read_addr(size, ns);
+ if (a >= b)
+ return 1;
+
+ return 0;
+}
+
+int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna)
+{
+ u32 result[OF_MAX_ADDR_CELLS];
+ int i;
+
+ if (ns > 2) {
+ printk("of_device: Cannot handle size cells (%d) > 2.", ns);
+ return -EINVAL;
+ }
+
+ if (of_out_of_range(addr, range, range + na + pna, na, ns))
+ return -EINVAL;
+
+ /* Start with the parent range base. */
+ memcpy(result, range + na, pna * 4);
+
+ /* Add in the child address offset. */
+ for (i = 0; i < na; i++)
+ result[pna - 1 - i] +=
+ (addr[na - 1 - i] -
+ range[na - 1 - i]);
+
+ memcpy(addr, result, pna * 4);
+
+ return 0;
+}
+
+unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
+{
+ if (flags)
+ return flags;
+ return IORESOURCE_MEM;
+}
+
+/*
+ * SBUS bus specific translator
+ */
+
+int of_bus_sbus_match(struct device_node *np)
+{
+ struct device_node *dp = np;
+
+ while (dp) {
+ if (!strcmp(dp->name, "sbus") ||
+ !strcmp(dp->name, "sbi"))
+ return 1;
+
+ /* Have a look at use_1to1_mapping(). We're trying
+ * to match SBUS if that's the top-level bus and we
+ * don't have some intervening real bus that provides
+ * ranges based translations.
+ */
+ if (of_find_property(dp, "ranges", NULL) != NULL)
+ break;
+
+ dp = dp->parent;
+ }
+
+ return 0;
+}
+
+void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec)
+{
+ if (addrc)
+ *addrc = 2;
+ if (sizec)
+ *sizec = 1;
+}
diff --git a/arch/sparc/kernel/of_device_common.h b/arch/sparc/kernel/of_device_common.h
new file mode 100644
index 000000000000..cdfd23992841
--- /dev/null
+++ b/arch/sparc/kernel/of_device_common.h
@@ -0,0 +1,36 @@
+#ifndef _OF_DEVICE_COMMON_H
+#define _OF_DEVICE_COMMON_H
+
+static inline u64 of_read_addr(const u32 *cell, int size)
+{
+ u64 r = 0;
+ while (size--)
+ r = (r << 32) | *(cell++);
+ return r;
+}
+
+void of_bus_default_count_cells(struct device_node *dev, int *addrc,
+ int *sizec);
+int of_out_of_range(const u32 *addr, const u32 *base,
+ const u32 *size, int na, int ns);
+int of_bus_default_map(u32 *addr, const u32 *range, int na, int ns, int pna);
+unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags);
+
+int of_bus_sbus_match(struct device_node *np);
+void of_bus_sbus_count_cells(struct device_node *child, int *addrc, int *sizec);
+
+/* Max address size we deal with */
+#define OF_MAX_ADDR_CELLS 4
+
+struct of_bus {
+ const char *name;
+ const char *addr_prop_name;
+ int (*match)(struct device_node *parent);
+ void (*count_cells)(struct device_node *child,
+ int *addrc, int *sizec);
+ int (*map)(u32 *addr, const u32 *range,
+ int na, int ns, int pna);
+ unsigned long (*get_flags)(const u32 *addr, unsigned long);
+};
+
+#endif /* _OF_DEVICE_COMMON_H */
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 5db5ebed35da..2485eaa23101 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -230,8 +230,9 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
free_pages((unsigned long)cpu, order);
}
-static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
- enum dma_data_direction direction)
+static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t sz,
+ enum dma_data_direction direction)
{
struct iommu *iommu;
unsigned long flags, npages, oaddr;
@@ -245,7 +246,7 @@ static dma_addr_t dma_4v_map_single(struct device *dev, void *ptr, size_t sz,
if (unlikely(direction == DMA_NONE))
goto bad;
- oaddr = (unsigned long)ptr;
+ oaddr = (unsigned long)(page_address(page) + offset);
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
@@ -294,8 +295,8 @@ iommu_map_fail:
return DMA_ERROR_CODE;
}
-static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
- size_t sz, enum dma_data_direction direction)
+static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
+ size_t sz, enum dma_data_direction direction)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
@@ -537,8 +538,8 @@ static void dma_4v_sync_sg_for_cpu(struct device *dev,
static const struct dma_ops sun4v_dma_ops = {
.alloc_coherent = dma_4v_alloc_coherent,
.free_coherent = dma_4v_free_coherent,
- .map_single = dma_4v_map_single,
- .unmap_single = dma_4v_unmap_single,
+ .map_page = dma_4v_map_page,
+ .unmap_page = dma_4v_unmap_page,
.map_sg = dma_4v_map_sg,
.unmap_sg = dma_4v_unmap_sg,
.sync_single_for_cpu = dma_4v_sync_single_for_cpu,
diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h
index bb0f0fda6cab..453397fe5e14 100644
--- a/arch/sparc/kernel/prom.h
+++ b/arch/sparc/kernel/prom.h
@@ -22,7 +22,6 @@ static inline int is_root_node(const struct device_node *dp)
extern char *build_path_component(struct device_node *dp);
extern void of_console_init(void);
-extern void of_fill_in_cpu_data(void);
extern unsigned int prom_early_allocated;
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index ca55c7012f77..fb06ac2bd38f 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -374,75 +374,26 @@ static const char *get_mid_prop(void)
return (tlb_type == spitfire ? "upa-portid" : "portid");
}
-struct device_node *of_find_node_by_cpuid(int cpuid)
-{
- struct device_node *dp;
- const char *mid_prop = get_mid_prop();
-
- for_each_node_by_type(dp, "cpu") {
- int id = of_getintprop_default(dp, mid_prop, -1);
- const char *this_mid_prop = mid_prop;
-
- if (id < 0) {
- this_mid_prop = "cpuid";
- id = of_getintprop_default(dp, this_mid_prop, -1);
- }
-
- if (id < 0) {
- prom_printf("OF: Serious problem, cpu lacks "
- "%s property", this_mid_prop);
- prom_halt();
- }
- if (cpuid == id)
- return dp;
- }
- return NULL;
-}
-
-void __init of_fill_in_cpu_data(void)
+static void *of_iterate_over_cpus(void *(*func)(struct device_node *, int, int), int arg)
{
struct device_node *dp;
const char *mid_prop;
- if (tlb_type == hypervisor)
- return;
-
mid_prop = get_mid_prop();
- ncpus_probed = 0;
for_each_node_by_type(dp, "cpu") {
int cpuid = of_getintprop_default(dp, mid_prop, -1);
const char *this_mid_prop = mid_prop;
- struct device_node *portid_parent;
- int portid = -1;
+ void *ret;
- portid_parent = NULL;
if (cpuid < 0) {
this_mid_prop = "cpuid";
cpuid = of_getintprop_default(dp, this_mid_prop, -1);
- if (cpuid >= 0) {
- int limit = 2;
-
- portid_parent = dp;
- while (limit--) {
- portid_parent = portid_parent->parent;
- if (!portid_parent)
- break;
- portid = of_getintprop_default(portid_parent,
- "portid", -1);
- if (portid >= 0)
- break;
- }
- }
}
-
if (cpuid < 0) {
prom_printf("OF: Serious problem, cpu lacks "
"%s property", this_mid_prop);
prom_halt();
}
-
- ncpus_probed++;
-
#ifdef CONFIG_SMP
if (cpuid >= NR_CPUS) {
printk(KERN_WARNING "Ignoring CPU %d which is "
@@ -450,79 +401,142 @@ void __init of_fill_in_cpu_data(void)
cpuid, NR_CPUS);
continue;
}
-#else
- /* On uniprocessor we only want the values for the
- * real physical cpu the kernel booted onto, however
- * cpu_data() only has one entry at index 0.
- */
- if (cpuid != real_hard_smp_processor_id())
- continue;
- cpuid = 0;
#endif
+ ret = func(dp, cpuid, arg);
+ if (ret)
+ return ret;
+ }
+ return NULL;
+}
- cpu_data(cpuid).clock_tick =
- of_getintprop_default(dp, "clock-frequency", 0);
-
- if (portid_parent) {
- cpu_data(cpuid).dcache_size =
- of_getintprop_default(dp, "l1-dcache-size",
- 16 * 1024);
- cpu_data(cpuid).dcache_line_size =
- of_getintprop_default(dp, "l1-dcache-line-size",
- 32);
- cpu_data(cpuid).icache_size =
- of_getintprop_default(dp, "l1-icache-size",
- 8 * 1024);
- cpu_data(cpuid).icache_line_size =
- of_getintprop_default(dp, "l1-icache-line-size",
- 32);
- cpu_data(cpuid).ecache_size =
- of_getintprop_default(dp, "l2-cache-size", 0);
- cpu_data(cpuid).ecache_line_size =
- of_getintprop_default(dp, "l2-cache-line-size", 0);
- if (!cpu_data(cpuid).ecache_size ||
- !cpu_data(cpuid).ecache_line_size) {
- cpu_data(cpuid).ecache_size =
- of_getintprop_default(portid_parent,
- "l2-cache-size",
- (4 * 1024 * 1024));
- cpu_data(cpuid).ecache_line_size =
- of_getintprop_default(portid_parent,
- "l2-cache-line-size", 64);
- }
-
- cpu_data(cpuid).core_id = portid + 1;
- cpu_data(cpuid).proc_id = portid;
+static void *check_cpu_node(struct device_node *dp, int cpuid, int id)
+{
+ if (id == cpuid)
+ return dp;
+ return NULL;
+}
+
+struct device_node *of_find_node_by_cpuid(int cpuid)
+{
+ return of_iterate_over_cpus(check_cpu_node, cpuid);
+}
+
+static void *record_one_cpu(struct device_node *dp, int cpuid, int arg)
+{
+ ncpus_probed++;
#ifdef CONFIG_SMP
- sparc64_multi_core = 1;
+ set_cpu_present(cpuid, true);
+ set_cpu_possible(cpuid, true);
#endif
- } else {
- cpu_data(cpuid).dcache_size =
- of_getintprop_default(dp, "dcache-size", 16 * 1024);
- cpu_data(cpuid).dcache_line_size =
- of_getintprop_default(dp, "dcache-line-size", 32);
+ return NULL;
+}
- cpu_data(cpuid).icache_size =
- of_getintprop_default(dp, "icache-size", 16 * 1024);
- cpu_data(cpuid).icache_line_size =
- of_getintprop_default(dp, "icache-line-size", 32);
+void __init of_populate_present_mask(void)
+{
+ if (tlb_type == hypervisor)
+ return;
+
+ ncpus_probed = 0;
+ of_iterate_over_cpus(record_one_cpu, 0);
+}
+static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg)
+{
+ struct device_node *portid_parent = NULL;
+ int portid = -1;
+
+ if (of_find_property(dp, "cpuid", NULL)) {
+ int limit = 2;
+
+ portid_parent = dp;
+ while (limit--) {
+ portid_parent = portid_parent->parent;
+ if (!portid_parent)
+ break;
+ portid = of_getintprop_default(portid_parent,
+ "portid", -1);
+ if (portid >= 0)
+ break;
+ }
+ }
+
+#ifndef CONFIG_SMP
+ /* On uniprocessor we only want the values for the
+ * real physical cpu the kernel booted onto, however
+ * cpu_data() only has one entry at index 0.
+ */
+ if (cpuid != real_hard_smp_processor_id())
+ return NULL;
+ cpuid = 0;
+#endif
+
+ cpu_data(cpuid).clock_tick =
+ of_getintprop_default(dp, "clock-frequency", 0);
+
+ if (portid_parent) {
+ cpu_data(cpuid).dcache_size =
+ of_getintprop_default(dp, "l1-dcache-size",
+ 16 * 1024);
+ cpu_data(cpuid).dcache_line_size =
+ of_getintprop_default(dp, "l1-dcache-line-size",
+ 32);
+ cpu_data(cpuid).icache_size =
+ of_getintprop_default(dp, "l1-icache-size",
+ 8 * 1024);
+ cpu_data(cpuid).icache_line_size =
+ of_getintprop_default(dp, "l1-icache-line-size",
+ 32);
+ cpu_data(cpuid).ecache_size =
+ of_getintprop_default(dp, "l2-cache-size", 0);
+ cpu_data(cpuid).ecache_line_size =
+ of_getintprop_default(dp, "l2-cache-line-size", 0);
+ if (!cpu_data(cpuid).ecache_size ||
+ !cpu_data(cpuid).ecache_line_size) {
cpu_data(cpuid).ecache_size =
- of_getintprop_default(dp, "ecache-size",
+ of_getintprop_default(portid_parent,
+ "l2-cache-size",
(4 * 1024 * 1024));
cpu_data(cpuid).ecache_line_size =
- of_getintprop_default(dp, "ecache-line-size", 64);
-
- cpu_data(cpuid).core_id = 0;
- cpu_data(cpuid).proc_id = -1;
+ of_getintprop_default(portid_parent,
+ "l2-cache-line-size", 64);
}
+ cpu_data(cpuid).core_id = portid + 1;
+ cpu_data(cpuid).proc_id = portid;
#ifdef CONFIG_SMP
- set_cpu_present(cpuid, true);
- set_cpu_possible(cpuid, true);
+ sparc64_multi_core = 1;
#endif
+ } else {
+ cpu_data(cpuid).dcache_size =
+ of_getintprop_default(dp, "dcache-size", 16 * 1024);
+ cpu_data(cpuid).dcache_line_size =
+ of_getintprop_default(dp, "dcache-line-size", 32);
+
+ cpu_data(cpuid).icache_size =
+ of_getintprop_default(dp, "icache-size", 16 * 1024);
+ cpu_data(cpuid).icache_line_size =
+ of_getintprop_default(dp, "icache-line-size", 32);
+
+ cpu_data(cpuid).ecache_size =
+ of_getintprop_default(dp, "ecache-size",
+ (4 * 1024 * 1024));
+ cpu_data(cpuid).ecache_line_size =
+ of_getintprop_default(dp, "ecache-line-size", 64);
+
+ cpu_data(cpuid).core_id = 0;
+ cpu_data(cpuid).proc_id = -1;
}
+ return NULL;
+}
+
+void __init of_fill_in_cpu_data(void)
+{
+ if (tlb_type == hypervisor)
+ return;
+
+ of_iterate_over_cpus(fill_in_one_cpu, 0);
+
smp_fill_in_sib_core_maps();
}
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index ff7b591c8946..0fb5789d43c8 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -313,6 +313,4 @@ void __init prom_build_devicetree(void)
printk("PROM: Built device tree with %u bytes of memory.\n",
prom_early_allocated);
-
- of_fill_in_cpu_data();
}
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index f7642e5a94db..fa44eaf8d897 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -20,7 +20,8 @@
#include <linux/cache.h>
#include <linux/jiffies.h>
#include <linux/profile.h>
-#include <linux/lmb.h>
+#include <linux/bootmem.h>
+#include <linux/vmalloc.h>
#include <linux/cpu.h>
#include <asm/head.h>
@@ -47,6 +48,8 @@
#include <asm/ldc.h>
#include <asm/hypervisor.h>
+#include "cpumap.h"
+
int sparc64_multi_core __read_mostly;
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
@@ -278,7 +281,7 @@ static unsigned long kimage_addr_to_ra(void *p)
return kern_base + (val - KERNBASE);
}
-static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
+static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp)
{
extern unsigned long sparc64_ttable_tl0;
extern unsigned long kern_locked_tte_data;
@@ -298,12 +301,12 @@ static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread
"hvtramp_descr.\n");
return;
}
+ *descrp = hdesc;
hdesc->cpu = cpu;
hdesc->num_mappings = num_kernel_image_mappings;
tb = &trap_block[cpu];
- tb->hdesc = hdesc;
hdesc->fault_info_va = (unsigned long) &tb->fault_info;
hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
@@ -341,12 +344,12 @@ static struct thread_info *cpu_new_thread = NULL;
static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
{
- struct trap_per_cpu *tb = &trap_block[cpu];
unsigned long entry =
(unsigned long)(&sparc64_cpu_startup);
unsigned long cookie =
(unsigned long)(&cpu_new_thread);
struct task_struct *p;
+ void *descr = NULL;
int timeout, ret;
p = fork_idle(cpu);
@@ -359,7 +362,8 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
#if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
if (ldom_domaining_enabled)
ldom_startcpu_cpuid(cpu,
- (unsigned long) cpu_new_thread);
+ (unsigned long) cpu_new_thread,
+ &descr);
else
#endif
prom_startcpu_cpuid(cpu, entry, cookie);
@@ -383,10 +387,7 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
}
cpu_new_thread = NULL;
- if (tb->hdesc) {
- kfree(tb->hdesc);
- tb->hdesc = NULL;
- }
+ kfree(descr);
return ret;
}
@@ -1315,6 +1316,8 @@ int __cpu_disable(void)
cpu_clear(cpu, cpu_online_map);
ipi_call_unlock();
+ cpu_map_rebuild();
+
return 0;
}
@@ -1373,36 +1376,171 @@ void smp_send_stop(void)
{
}
-unsigned long __per_cpu_base __read_mostly;
-unsigned long __per_cpu_shift __read_mostly;
+/**
+ * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
+ * @cpu: cpu to allocate for
+ * @size: size allocation in bytes
+ * @align: alignment
+ *
+ * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
+ * does the right thing for NUMA regardless of the current
+ * configuration.
+ *
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
+ */
+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
+ unsigned long align)
+{
+ const unsigned long goal = __pa(MAX_DMA_ADDRESS);
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+ int node = cpu_to_node(cpu);
+ void *ptr;
+
+ if (!node_online(node) || !NODE_DATA(node)) {
+ ptr = __alloc_bootmem(size, align, goal);
+ pr_info("cpu %d has no node %d or node-local memory\n",
+ cpu, node);
+ pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
+ cpu, size, __pa(ptr));
+ } else {
+ ptr = __alloc_bootmem_node(NODE_DATA(node),
+ size, align, goal);
+ pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
+ "%016lx\n", cpu, size, node, __pa(ptr));
+ }
+ return ptr;
+#else
+ return __alloc_bootmem(size, align, goal);
+#endif
+}
-EXPORT_SYMBOL(__per_cpu_base);
-EXPORT_SYMBOL(__per_cpu_shift);
+static size_t pcpur_size __initdata;
+static void **pcpur_ptrs __initdata;
-void __init real_setup_per_cpu_areas(void)
+static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
{
- unsigned long paddr, goal, size, i;
- char *ptr;
+ size_t off = (size_t)pageno << PAGE_SHIFT;
- /* Copy section for each CPU (we discard the original) */
- goal = PERCPU_ENOUGH_ROOM;
+ if (off >= pcpur_size)
+ return NULL;
- __per_cpu_shift = PAGE_SHIFT;
- for (size = PAGE_SIZE; size < goal; size <<= 1UL)
- __per_cpu_shift++;
+ return virt_to_page(pcpur_ptrs[cpu] + off);
+}
+
+#define PCPU_CHUNK_SIZE (4UL * 1024UL * 1024UL)
+
+static void __init pcpu_map_range(unsigned long start, unsigned long end,
+ struct page *page)
+{
+ unsigned long pfn = page_to_pfn(page);
+ unsigned long pte_base;
+
+ BUG_ON((pfn<<PAGE_SHIFT)&(PCPU_CHUNK_SIZE - 1UL));
+
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+ _PAGE_CP_4U | _PAGE_CV_4U |
+ _PAGE_P_4U | _PAGE_W_4U);
+ if (tlb_type == hypervisor)
+ pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
+ _PAGE_CP_4V | _PAGE_CV_4V |
+ _PAGE_P_4V | _PAGE_W_4V);
+
+ while (start < end) {
+ pgd_t *pgd = pgd_offset_k(start);
+ unsigned long this_end;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pud = pud_offset(pgd, start);
+ if (pud_none(*pud)) {
+ pmd_t *new;
+
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ pud_populate(&init_mm, pud, new);
+ }
+
+ pmd = pmd_offset(pud, start);
+ if (!pmd_present(*pmd)) {
+ pte_t *new;
+
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ pmd_populate_kernel(&init_mm, pmd, new);
+ }
- paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE);
- if (!paddr) {
- prom_printf("Cannot allocate per-cpu memory.\n");
- prom_halt();
+ pte = pte_offset_kernel(pmd, start);
+ this_end = (start + PMD_SIZE) & PMD_MASK;
+ if (this_end > end)
+ this_end = end;
+
+ while (start < this_end) {
+ unsigned long paddr = pfn << PAGE_SHIFT;
+
+ pte_val(*pte) = (paddr | pte_base);
+
+ start += PAGE_SIZE;
+ pte++;
+ pfn++;
+ }
+ }
+}
+
+void __init setup_per_cpu_areas(void)
+{
+ size_t dyn_size, static_size = __per_cpu_end - __per_cpu_start;
+ static struct vm_struct vm;
+ unsigned long delta, cpu;
+ size_t pcpu_unit_size;
+ size_t ptrs_size;
+
+ pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
+ PERCPU_DYNAMIC_RESERVE);
+ dyn_size = pcpur_size - static_size - PERCPU_MODULE_RESERVE;
+
+
+ ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
+ pcpur_ptrs = alloc_bootmem(ptrs_size);
+
+ for_each_possible_cpu(cpu) {
+ pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PCPU_CHUNK_SIZE,
+ PCPU_CHUNK_SIZE);
+
+ free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
+ PCPU_CHUNK_SIZE - pcpur_size);
+
+ memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
}
- ptr = __va(paddr);
- __per_cpu_base = ptr - __per_cpu_start;
+ /* allocate address and map */
+ vm.flags = VM_ALLOC;
+ vm.size = num_possible_cpus() * PCPU_CHUNK_SIZE;
+ vm_area_register_early(&vm, PCPU_CHUNK_SIZE);
+
+ for_each_possible_cpu(cpu) {
+ unsigned long start = (unsigned long) vm.addr;
+ unsigned long end;
+
+ start += cpu * PCPU_CHUNK_SIZE;
+ end = start + PCPU_CHUNK_SIZE;
+ pcpu_map_range(start, end, virt_to_page(pcpur_ptrs[cpu]));
+ }
- for (i = 0; i < NR_CPUS; i++, ptr += size)
- memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+ pcpu_unit_size = pcpu_setup_first_chunk(pcpur_get_page, static_size,
+ PERCPU_MODULE_RESERVE, dyn_size,
+ PCPU_CHUNK_SIZE, vm.addr, NULL);
+
+ free_bootmem(__pa(pcpur_ptrs), ptrs_size);
+
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu) {
+ __per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
+ }
/* Setup %g5 for the boot cpu. */
__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
+
+ of_fill_in_cpu_data();
+ if (tlb_type == hypervisor)
+ mdesc_fill_in_cpu_data(cpu_all_mask);
}
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 00ec3b15f38c..690901657291 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -81,4 +81,6 @@ sys_call_table:
/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv, sys_pwritev
+/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
+/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo
+
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 82b5bf85b9d2..6b3ee88e253c 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -82,7 +82,8 @@ sys_call_table32:
.word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv, compat_sys_pwritev
+/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
+ .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo
#endif /* CONFIG_COMPAT */
@@ -156,4 +157,5 @@ sys_call_table:
.word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv, sys_pwritev
+/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
+ .word sys_pwritev, sys_rt_tgsigqueueinfo
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index d809c4ebb48f..10f7bb9fc140 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2509,6 +2509,7 @@ void do_getpsr(struct pt_regs *regs)
}
struct trap_per_cpu trap_block[NR_CPUS];
+EXPORT_SYMBOL(trap_block);
/* This can get invoked before sched_init() so play it super safe
* and use hard_smp_processor_id().
@@ -2530,84 +2531,97 @@ extern void tsb_config_offsets_are_bolixed_dave(void);
void __init trap_init(void)
{
/* Compile time sanity check. */
- if (TI_TASK != offsetof(struct thread_info, task) ||
- TI_FLAGS != offsetof(struct thread_info, flags) ||
- TI_CPU != offsetof(struct thread_info, cpu) ||
- TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
- TI_KSP != offsetof(struct thread_info, ksp) ||
- TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
- TI_KREGS != offsetof(struct thread_info, kregs) ||
- TI_UTRAPS != offsetof(struct thread_info, utraps) ||
- TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
- TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
- TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
- TI_GSR != offsetof(struct thread_info, gsr) ||
- TI_XFSR != offsetof(struct thread_info, xfsr) ||
- TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
- TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
- TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
- TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
- TI_PCR != offsetof(struct thread_info, pcr_reg) ||
- TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
- TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
- TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
- TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
- TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
- TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
- TI_FPREGS != offsetof(struct thread_info, fpregs) ||
- (TI_FPREGS & (64 - 1)))
- thread_info_offsets_are_bolixed_dave();
-
- if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
- (TRAP_PER_CPU_PGD_PADDR !=
- offsetof(struct trap_per_cpu, pgd_paddr)) ||
- (TRAP_PER_CPU_CPU_MONDO_PA !=
- offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
- (TRAP_PER_CPU_DEV_MONDO_PA !=
- offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
- (TRAP_PER_CPU_RESUM_MONDO_PA !=
- offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
- (TRAP_PER_CPU_RESUM_KBUF_PA !=
- offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
- (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
- offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
- (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
- offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
- (TRAP_PER_CPU_FAULT_INFO !=
- offsetof(struct trap_per_cpu, fault_info)) ||
- (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
- offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
- (TRAP_PER_CPU_CPU_LIST_PA !=
- offsetof(struct trap_per_cpu, cpu_list_pa)) ||
- (TRAP_PER_CPU_TSB_HUGE !=
- offsetof(struct trap_per_cpu, tsb_huge)) ||
- (TRAP_PER_CPU_TSB_HUGE_TEMP !=
- offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
- (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
- offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
- (TRAP_PER_CPU_CPU_MONDO_QMASK !=
- offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
- (TRAP_PER_CPU_DEV_MONDO_QMASK !=
- offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
- (TRAP_PER_CPU_RESUM_QMASK !=
- offsetof(struct trap_per_cpu, resum_qmask)) ||
- (TRAP_PER_CPU_NONRESUM_QMASK !=
- offsetof(struct trap_per_cpu, nonresum_qmask)))
- trap_per_cpu_offsets_are_bolixed_dave();
-
- if ((TSB_CONFIG_TSB !=
- offsetof(struct tsb_config, tsb)) ||
- (TSB_CONFIG_RSS_LIMIT !=
- offsetof(struct tsb_config, tsb_rss_limit)) ||
- (TSB_CONFIG_NENTRIES !=
- offsetof(struct tsb_config, tsb_nentries)) ||
- (TSB_CONFIG_REG_VAL !=
- offsetof(struct tsb_config, tsb_reg_val)) ||
- (TSB_CONFIG_MAP_VADDR !=
- offsetof(struct tsb_config, tsb_map_vaddr)) ||
- (TSB_CONFIG_MAP_PTE !=
- offsetof(struct tsb_config, tsb_map_pte)))
- tsb_config_offsets_are_bolixed_dave();
+ BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
+ TI_FLAGS != offsetof(struct thread_info, flags) ||
+ TI_CPU != offsetof(struct thread_info, cpu) ||
+ TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
+ TI_KSP != offsetof(struct thread_info, ksp) ||
+ TI_FAULT_ADDR != offsetof(struct thread_info,
+ fault_address) ||
+ TI_KREGS != offsetof(struct thread_info, kregs) ||
+ TI_UTRAPS != offsetof(struct thread_info, utraps) ||
+ TI_EXEC_DOMAIN != offsetof(struct thread_info,
+ exec_domain) ||
+ TI_REG_WINDOW != offsetof(struct thread_info,
+ reg_window) ||
+ TI_RWIN_SPTRS != offsetof(struct thread_info,
+ rwbuf_stkptrs) ||
+ TI_GSR != offsetof(struct thread_info, gsr) ||
+ TI_XFSR != offsetof(struct thread_info, xfsr) ||
+ TI_USER_CNTD0 != offsetof(struct thread_info,
+ user_cntd0) ||
+ TI_USER_CNTD1 != offsetof(struct thread_info,
+ user_cntd1) ||
+ TI_KERN_CNTD0 != offsetof(struct thread_info,
+ kernel_cntd0) ||
+ TI_KERN_CNTD1 != offsetof(struct thread_info,
+ kernel_cntd1) ||
+ TI_PCR != offsetof(struct thread_info, pcr_reg) ||
+ TI_PRE_COUNT != offsetof(struct thread_info,
+ preempt_count) ||
+ TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
+ TI_SYS_NOERROR != offsetof(struct thread_info,
+ syscall_noerror) ||
+ TI_RESTART_BLOCK != offsetof(struct thread_info,
+ restart_block) ||
+ TI_KUNA_REGS != offsetof(struct thread_info,
+ kern_una_regs) ||
+ TI_KUNA_INSN != offsetof(struct thread_info,
+ kern_una_insn) ||
+ TI_FPREGS != offsetof(struct thread_info, fpregs) ||
+ (TI_FPREGS & (64 - 1)));
+
+ BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
+ thread) ||
+ (TRAP_PER_CPU_PGD_PADDR !=
+ offsetof(struct trap_per_cpu, pgd_paddr)) ||
+ (TRAP_PER_CPU_CPU_MONDO_PA !=
+ offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
+ (TRAP_PER_CPU_DEV_MONDO_PA !=
+ offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
+ (TRAP_PER_CPU_RESUM_MONDO_PA !=
+ offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
+ (TRAP_PER_CPU_RESUM_KBUF_PA !=
+ offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
+ (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
+ offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
+ (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
+ offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
+ (TRAP_PER_CPU_FAULT_INFO !=
+ offsetof(struct trap_per_cpu, fault_info)) ||
+ (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
+ offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
+ (TRAP_PER_CPU_CPU_LIST_PA !=
+ offsetof(struct trap_per_cpu, cpu_list_pa)) ||
+ (TRAP_PER_CPU_TSB_HUGE !=
+ offsetof(struct trap_per_cpu, tsb_huge)) ||
+ (TRAP_PER_CPU_TSB_HUGE_TEMP !=
+ offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
+ (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
+ offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
+ (TRAP_PER_CPU_CPU_MONDO_QMASK !=
+ offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
+ (TRAP_PER_CPU_DEV_MONDO_QMASK !=
+ offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
+ (TRAP_PER_CPU_RESUM_QMASK !=
+ offsetof(struct trap_per_cpu, resum_qmask)) ||
+ (TRAP_PER_CPU_NONRESUM_QMASK !=
+ offsetof(struct trap_per_cpu, nonresum_qmask)) ||
+ (TRAP_PER_CPU_PER_CPU_BASE !=
+ offsetof(struct trap_per_cpu, __per_cpu_base)));
+
+ BUILD_BUG_ON((TSB_CONFIG_TSB !=
+ offsetof(struct tsb_config, tsb)) ||
+ (TSB_CONFIG_RSS_LIMIT !=
+ offsetof(struct tsb_config, tsb_rss_limit)) ||
+ (TSB_CONFIG_NENTRIES !=
+ offsetof(struct tsb_config, tsb_nentries)) ||
+ (TSB_CONFIG_REG_VAL !=
+ offsetof(struct tsb_config, tsb_reg_val)) ||
+ (TSB_CONFIG_MAP_VADDR !=
+ offsetof(struct tsb_config, tsb_map_vaddr)) ||
+ (TSB_CONFIG_MAP_PTE !=
+ offsetof(struct tsb_config, tsb_map_pte)));
/* Attach to the address space of init_task. On SMP we
* do this in smp.c:smp_callin for other cpus.
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index cbb282dab5a7..26bb3919ff1f 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -358,6 +358,7 @@ void __init paging_init(void)
protection_map[15] = PAGE_SHARED;
btfixup();
prom_build_devicetree();
+ of_fill_in_cpu_data();
device_scan();
}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index f26a352c08a0..ca92e2f54e4d 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1679,11 +1679,6 @@ pgd_t swapper_pg_dir[2048];
static void sun4u_pgprot_init(void);
static void sun4v_pgprot_init(void);
-/* Dummy function */
-void __init setup_per_cpu_areas(void)
-{
-}
-
void __init paging_init(void)
{
unsigned long end_pfn, shift, phys_base;
@@ -1799,16 +1794,13 @@ void __init paging_init(void)
if (tlb_type == hypervisor)
sun4v_ktsb_register();
- /* We must setup the per-cpu areas before we pull in the
- * PROM and the MDESC. The code there fills in cpu and
- * other information into per-cpu data structures.
- */
- real_setup_per_cpu_areas();
-
prom_build_devicetree();
+ of_populate_present_mask();
- if (tlb_type == hypervisor)
+ if (tlb_type == hypervisor) {
sun4v_mdesc_init();
+ mdesc_populate_present_mask(cpu_all_mask);
+ }
/* Once the OF device tree and MDESC have been setup, we know
* the list of possible cpus. Therefore we can allocate the
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 06c9a7d98206..ade4eb373bdd 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -19,6 +19,7 @@
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/kdebug.h>
+#include <linux/log2.h>
#include <asm/bitext.h>
#include <asm/page.h>
@@ -349,7 +350,7 @@ static void srmmu_free_nocache(unsigned long vaddr, int size)
vaddr, srmmu_nocache_end);
BUG();
}
- if (size & (size-1)) {
+ if (!is_power_of_2(size)) {
printk("Size 0x%x is not a power of 2\n", size);
BUG();
}
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 434ba121e3c5..3b44b47c7e1d 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -360,7 +360,7 @@ static struct platform_driver uml_net_driver = {
static void net_device_release(struct device *dev)
{
- struct uml_net *device = dev->driver_data;
+ struct uml_net *device = dev_get_drvdata(dev);
struct net_device *netdev = device->dev;
struct uml_net_private *lp = netdev_priv(netdev);
@@ -440,7 +440,7 @@ static void eth_configure(int n, void *init, char *mac,
device->pdev.id = n;
device->pdev.name = DRIVER_NAME;
device->pdev.dev.release = net_device_release;
- device->pdev.dev.driver_data = device;
+ dev_set_drvdata(&device->pdev.dev, device);
if (platform_device_register(&device->pdev))
goto out_free_netdev;
SET_NETDEV_DEV(dev,&device->pdev.dev);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index aa9e926e13d7..8f05d4d9da12 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -778,7 +778,7 @@ static int ubd_open_dev(struct ubd *ubd_dev)
static void ubd_device_release(struct device *dev)
{
- struct ubd *ubd_dev = dev->driver_data;
+ struct ubd *ubd_dev = dev_get_drvdata(dev);
blk_cleanup_queue(ubd_dev->queue);
*ubd_dev = ((struct ubd) DEFAULT_UBD);
@@ -807,7 +807,7 @@ static int ubd_disk_register(int major, u64 size, int unit,
ubd_devs[unit].pdev.id = unit;
ubd_devs[unit].pdev.name = DRIVER_NAME;
ubd_devs[unit].pdev.dev.release = ubd_device_release;
- ubd_devs[unit].pdev.dev.driver_data = &ubd_devs[unit];
+ dev_set_drvdata(&ubd_devs[unit].pdev.dev, &ubd_devs[unit]);
platform_device_register(&ubd_devs[unit].pdev);
disk->driverfs_dev = &ubd_devs[unit].pdev.dev;
}
diff --git a/arch/um/include/shared/init.h b/arch/um/include/shared/init.h
index 37dd097c16c0..b3906f860a87 100644
--- a/arch/um/include/shared/init.h
+++ b/arch/um/include/shared/init.h
@@ -27,7 +27,7 @@
* sign followed by value, e.g.:
*
* static int init_variable __initdata = 0;
- * static char linux_logo[] __initdata = { 0x32, 0x36, ... };
+ * static const char linux_logo[] __initconst = { 0x32, 0x36, ... };
*
* Don't forget to initialize data not at file scope, i.e. within a function,
* as gcc otherwise puts the data into the bss section and not into the init
diff --git a/arch/um/include/shared/net_user.h b/arch/um/include/shared/net_user.h
index 63bee158cd8e..3dabbe128e40 100644
--- a/arch/um/include/shared/net_user.h
+++ b/arch/um/include/shared/net_user.h
@@ -8,7 +8,7 @@
#define ETH_ADDR_LEN (6)
#define ETH_HEADER_ETHERTAP (16)
-#define ETH_HEADER_OTHER (14)
+#define ETH_HEADER_OTHER (26) /* 14 for ethernet + VLAN + MPLS for crazy people */
#define ETH_MAX_PACKET (1500)
#define UML_NET_VERSION (4)
diff --git a/arch/um/kernel/init_task.c b/arch/um/kernel/init_task.c
index 806d381947bf..b25121b537d8 100644
--- a/arch/um/kernel/init_task.c
+++ b/arch/um/kernel/init_task.c
@@ -10,11 +10,8 @@
#include "linux/mqueue.h"
#include "asm/uaccess.h"
-struct mm_struct init_mm = INIT_MM(init_mm);
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial task structure.
*
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 336b61569072..454cdb43e351 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -358,7 +358,7 @@ EXPORT_SYMBOL(um_request_irq);
EXPORT_SYMBOL(reactivate_fd);
/*
- * hw_interrupt_type must define (startup || enable) &&
+ * irq_chip must define (startup || enable) &&
* (shutdown || disable) && end
*/
static void dummy(unsigned int irq)
@@ -366,7 +366,7 @@ static void dummy(unsigned int irq)
}
/* This is used for everything else than the timer. */
-static struct hw_interrupt_type normal_irq_type = {
+static struct irq_chip normal_irq_type = {
.typename = "SIGIO",
.release = free_irq_by_irq_and_dev,
.disable = dummy,
@@ -375,7 +375,7 @@ static struct hw_interrupt_type normal_irq_type = {
.end = dummy
};
-static struct hw_interrupt_type SIGVTALRM_irq_type = {
+static struct irq_chip SIGVTALRM_irq_type = {
.typename = "SIGVTALRM",
.release = free_irq_by_irq_and_dev,
.shutdown = dummy, /* never called */
diff --git a/arch/um/sys-i386/stub.S b/arch/um/sys-i386/stub.S
index c41b04bf5fa0..54a36ec20cb7 100644
--- a/arch/um/sys-i386/stub.S
+++ b/arch/um/sys-i386/stub.S
@@ -1,7 +1,7 @@
#include "as-layout.h"
.globl syscall_stub
-.section .__syscall_stub, "x"
+.section .__syscall_stub, "ax"
.globl batch_syscall_stub
batch_syscall_stub:
diff --git a/arch/um/sys-x86_64/asm/elf.h b/arch/um/sys-x86_64/asm/elf.h
index 6e8a9195e952..04b9e87c8dad 100644
--- a/arch/um/sys-x86_64/asm/elf.h
+++ b/arch/um/sys-x86_64/asm/elf.h
@@ -66,28 +66,28 @@ typedef struct user_i387_struct elf_fpregset_t;
PT_REGS_R15(regs) = 0; \
} while (0)
-#define ELF_CORE_COPY_REGS(pr_reg, regs) \
- (pr_reg)[0] = (regs)->regs.gp[0]; \
- (pr_reg)[1] = (regs)->regs.gp[1]; \
- (pr_reg)[2] = (regs)->regs.gp[2]; \
- (pr_reg)[3] = (regs)->regs.gp[3]; \
- (pr_reg)[4] = (regs)->regs.gp[4]; \
- (pr_reg)[5] = (regs)->regs.gp[5]; \
- (pr_reg)[6] = (regs)->regs.gp[6]; \
- (pr_reg)[7] = (regs)->regs.gp[7]; \
- (pr_reg)[8] = (regs)->regs.gp[8]; \
- (pr_reg)[9] = (regs)->regs.gp[9]; \
- (pr_reg)[10] = (regs)->regs.gp[10]; \
- (pr_reg)[11] = (regs)->regs.gp[11]; \
- (pr_reg)[12] = (regs)->regs.gp[12]; \
- (pr_reg)[13] = (regs)->regs.gp[13]; \
- (pr_reg)[14] = (regs)->regs.gp[14]; \
- (pr_reg)[15] = (regs)->regs.gp[15]; \
- (pr_reg)[16] = (regs)->regs.gp[16]; \
- (pr_reg)[17] = (regs)->regs.gp[17]; \
- (pr_reg)[18] = (regs)->regs.gp[18]; \
- (pr_reg)[19] = (regs)->regs.gp[19]; \
- (pr_reg)[20] = (regs)->regs.gp[20]; \
+#define ELF_CORE_COPY_REGS(pr_reg, _regs) \
+ (pr_reg)[0] = (_regs)->regs.gp[0]; \
+ (pr_reg)[1] = (_regs)->regs.gp[1]; \
+ (pr_reg)[2] = (_regs)->regs.gp[2]; \
+ (pr_reg)[3] = (_regs)->regs.gp[3]; \
+ (pr_reg)[4] = (_regs)->regs.gp[4]; \
+ (pr_reg)[5] = (_regs)->regs.gp[5]; \
+ (pr_reg)[6] = (_regs)->regs.gp[6]; \
+ (pr_reg)[7] = (_regs)->regs.gp[7]; \
+ (pr_reg)[8] = (_regs)->regs.gp[8]; \
+ (pr_reg)[9] = (_regs)->regs.gp[9]; \
+ (pr_reg)[10] = (_regs)->regs.gp[10]; \
+ (pr_reg)[11] = (_regs)->regs.gp[11]; \
+ (pr_reg)[12] = (_regs)->regs.gp[12]; \
+ (pr_reg)[13] = (_regs)->regs.gp[13]; \
+ (pr_reg)[14] = (_regs)->regs.gp[14]; \
+ (pr_reg)[15] = (_regs)->regs.gp[15]; \
+ (pr_reg)[16] = (_regs)->regs.gp[16]; \
+ (pr_reg)[17] = (_regs)->regs.gp[17]; \
+ (pr_reg)[18] = (_regs)->regs.gp[18]; \
+ (pr_reg)[19] = (_regs)->regs.gp[19]; \
+ (pr_reg)[20] = (_regs)->regs.gp[20]; \
(pr_reg)[21] = current->thread.arch.fs; \
(pr_reg)[22] = 0; \
(pr_reg)[23] = 0; \
diff --git a/arch/um/sys-x86_64/stub.S b/arch/um/sys-x86_64/stub.S
index 6d9edf9fabce..20e4a96a6dcb 100644
--- a/arch/um/sys-x86_64/stub.S
+++ b/arch/um/sys-x86_64/stub.S
@@ -1,7 +1,7 @@
#include "as-layout.h"
.globl syscall_stub
-.section .__syscall_stub, "x"
+.section .__syscall_stub, "ax"
syscall_stub:
syscall
/* We don't have 64-bit constants, so this constructs the address
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 356d2ec8e2fb..cf42fc305419 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -46,6 +46,7 @@ config X86
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
+ select HAVE_ARCH_KMEMCHECK
config OUTPUT_FORMAT
string
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index edbd0ca62067..1b68659c41b4 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -81,6 +81,11 @@ ifdef CONFIG_CC_STACKPROTECTOR
endif
endif
+# Don't unroll struct assignments with kmemcheck enabled
+ifeq ($(CONFIG_KMEMCHECK),y)
+ KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
+endif
+
# Stackpointer is addressed different for 32 bit and 64 bit x86
sp-$(CONFIG_X86_32) := esp
sp-$(CONFIG_X86_64) := rsp
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index f82fdc412c64..b93405b228b4 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -6,6 +6,7 @@
* Documentation/DMA-API.txt for documentation.
*/
+#include <linux/kmemcheck.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
@@ -60,6 +61,7 @@ dma_map_single(struct device *hwdev, void *ptr, size_t size,
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
+ kmemcheck_mark_initialized(ptr, size);
addr = ops->map_page(hwdev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
dir, NULL);
@@ -87,8 +89,12 @@ dma_map_sg(struct device *hwdev, struct scatterlist *sg,
{
struct dma_map_ops *ops = get_dma_ops(hwdev);
int ents;
+ struct scatterlist *s;
+ int i;
BUG_ON(!valid_dma_direction(dir));
+ for_each_sg(sg, s, nents, i)
+ kmemcheck_mark_initialized(sg_virt(s), s->length);
ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
debug_dma_map_sg(hwdev, sg, nents, ents, dir);
@@ -200,6 +206,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
+ kmemcheck_mark_initialized(page_address(page) + offset, size);
addr = ops->map_page(dev, page, offset, size, dir, NULL);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
diff --git a/arch/x86/include/asm/kmap_types.h b/arch/x86/include/asm/kmap_types.h
index 5759c165a5cf..9e00a731a7fb 100644
--- a/arch/x86/include/asm/kmap_types.h
+++ b/arch/x86/include/asm/kmap_types.h
@@ -2,28 +2,11 @@
#define _ASM_X86_KMAP_TYPES_H
#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM)
-# define D(n) __KM_FENCE_##n ,
-#else
-# define D(n)
+#define __WITH_KM_FENCE
#endif
-enum km_type {
-D(0) KM_BOUNCE_READ,
-D(1) KM_SKB_SUNRPC_DATA,
-D(2) KM_SKB_DATA_SOFTIRQ,
-D(3) KM_USER0,
-D(4) KM_USER1,
-D(5) KM_BIO_SRC_IRQ,
-D(6) KM_BIO_DST_IRQ,
-D(7) KM_PTE0,
-D(8) KM_PTE1,
-D(9) KM_IRQ0,
-D(10) KM_IRQ1,
-D(11) KM_SOFTIRQ0,
-D(12) KM_SOFTIRQ1,
-D(13) KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
-#undef D
+#undef __WITH_KM_FENCE
#endif /* _ASM_X86_KMAP_TYPES_H */
diff --git a/arch/x86/include/asm/kmemcheck.h b/arch/x86/include/asm/kmemcheck.h
new file mode 100644
index 000000000000..ed01518f297e
--- /dev/null
+++ b/arch/x86/include/asm/kmemcheck.h
@@ -0,0 +1,42 @@
+#ifndef ASM_X86_KMEMCHECK_H
+#define ASM_X86_KMEMCHECK_H
+
+#include <linux/types.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_KMEMCHECK
+bool kmemcheck_active(struct pt_regs *regs);
+
+void kmemcheck_show(struct pt_regs *regs);
+void kmemcheck_hide(struct pt_regs *regs);
+
+bool kmemcheck_fault(struct pt_regs *regs,
+ unsigned long address, unsigned long error_code);
+bool kmemcheck_trap(struct pt_regs *regs);
+#else
+static inline bool kmemcheck_active(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline void kmemcheck_show(struct pt_regs *regs)
+{
+}
+
+static inline void kmemcheck_hide(struct pt_regs *regs)
+{
+}
+
+static inline bool kmemcheck_fault(struct pt_regs *regs,
+ unsigned long address, unsigned long error_code)
+{
+ return false;
+}
+
+static inline bool kmemcheck_trap(struct pt_regs *regs)
+{
+ return false;
+}
+#endif /* CONFIG_KMEMCHECK */
+
+#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 18ef7ebf2631..3cc06e3fceb8 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -317,6 +317,11 @@ static inline int pte_present(pte_t a)
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
+static inline int pte_hidden(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_HIDDEN;
+}
+
static inline int pmd_present(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_PRESENT;
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 4d258ad76a0f..54cb697f4900 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -18,7 +18,7 @@
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
-#define _PAGE_BIT_UNUSED3 11
+#define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
@@ -41,13 +41,18 @@
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
-#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
#define __HAVE_ARCH_PTE_SPECIAL
+#ifdef CONFIG_KMEMCHECK
+#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+#else
+#define _PAGE_HIDDEN (_AT(pteval_t, 0))
+#endif
+
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#else
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
index 0e0e3ba827f7..c86f452256de 100644
--- a/arch/x86/include/asm/string_32.h
+++ b/arch/x86/include/asm/string_32.h
@@ -177,10 +177,18 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
* No 3D Now!
*/
+#ifndef CONFIG_KMEMCHECK
#define memcpy(t, f, n) \
(__builtin_constant_p((n)) \
? __constant_memcpy((t), (f), (n)) \
: __memcpy((t), (f), (n)))
+#else
+/*
+ * kmemcheck becomes very happy if we use the REP instructions unconditionally,
+ * because it means that we know both memory operands in advance.
+ */
+#define memcpy(t, f, n) __memcpy((t), (f), (n))
+#endif
#endif
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 2afe164bf1e6..19e2c468fc2c 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -27,6 +27,7 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
function. */
#define __HAVE_ARCH_MEMCPY 1
+#ifndef CONFIG_KMEMCHECK
#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4
extern void *memcpy(void *to, const void *from, size_t len);
#else
@@ -42,6 +43,13 @@ extern void *__memcpy(void *to, const void *from, size_t len);
__ret; \
})
#endif
+#else
+/*
+ * kmemcheck becomes very happy if we use the REP instructions unconditionally,
+ * because it means that we know both memory operands in advance.
+ */
+#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
+#endif
#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 602c769fc98c..b0783520988b 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -154,9 +154,9 @@ struct thread_info {
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
-#define THREAD_FLAGS (GFP_KERNEL | __GFP_ZERO)
+#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
#else
-#define THREAD_FLAGS GFP_KERNEL
+#define THREAD_FLAGS (GFP_KERNEL | __GFP_NOTRACK)
#endif
#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h
index b5c9d45c981f..1375cfc93960 100644
--- a/arch/x86/include/asm/timex.h
+++ b/arch/x86/include/asm/timex.h
@@ -4,9 +4,7 @@
#include <asm/processor.h>
#include <asm/tsc.h>
-/* The PIT ticks at this frequency (in HZ): */
-#define PIT_TICK_RATE 1193182
-
+/* Assume we use the PIT time source for the clock tick */
#define CLOCK_TICK_RATE PIT_TICK_RATE
#define ARCH_HAS_READ_CURRENT_TIMER
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h
index 11b3bb86e17b..7fcf6f3dbcc3 100644
--- a/arch/x86/include/asm/xor.h
+++ b/arch/x86/include/asm/xor.h
@@ -1,5 +1,10 @@
+#ifdef CONFIG_KMEMCHECK
+/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
+# include <asm-generic/xor.h>
+#else
#ifdef CONFIG_X86_32
# include "xor_32.h"
#else
# include "xor_64.h"
#endif
+#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 3ffdcfa9abdf..9fa33886c0d7 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -487,7 +487,6 @@ out:
static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
{
char *v = c->x86_vendor_id;
- static int printed;
int i;
for (i = 0; i < X86_VENDOR_NUM; i++) {
@@ -504,13 +503,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
}
}
- if (!printed) {
- printed++;
- printk(KERN_ERR
- "CPU: vendor_id '%s' unknown, using generic init.\n", v);
-
- printk(KERN_ERR "CPU: Your system may be unstable.\n");
- }
+ printk_once(KERN_ERR
+ "CPU: vendor_id '%s' unknown, using generic init.\n" \
+ "CPU: Your system may be unstable.\n", v);
c->x86_vendor = X86_VENDOR_UNKNOWN;
this_cpu = &default_cpu;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index daed39ba2614..3260ab044996 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -86,6 +86,29 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
*/
if (c->x86 == 6 && c->x86_model < 15)
clear_cpu_cap(c, X86_FEATURE_PAT);
+
+#ifdef CONFIG_KMEMCHECK
+ /*
+ * P4s have a "fast strings" feature which causes single-
+ * stepping REP instructions to only generate a #DB on
+ * cache-line boundaries.
+ *
+ * Ingo Molnar reported a Pentium D (model 6) and a Xeon
+ * (model 2) with the same problem.
+ */
+ if (c->x86 == 15) {
+ u64 misc_enable;
+
+ rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+
+ if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
+ printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
+
+ misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
+ wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
+ }
+ }
+#endif
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 2ac1f0c2beb3..b07af8861244 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -182,6 +182,11 @@ static struct notifier_block __refdata cpuid_class_cpu_notifier =
.notifier_call = cpuid_class_cpu_callback,
};
+static char *cpuid_nodename(struct device *dev)
+{
+ return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt));
+}
+
static int __init cpuid_init(void)
{
int i, err = 0;
@@ -198,6 +203,7 @@ static int __init cpuid_init(void)
err = PTR_ERR(cpuid_class);
goto out_chrdev;
}
+ cpuid_class->nodename = cpuid_nodename;
for_each_online_cpu(i) {
err = cpuid_device_create(i);
if (err != 0)
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index c2e0bb0890d4..5cf36c053ac4 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -7,6 +7,7 @@
#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <linux/module.h>
+#include <linux/timex.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
index df3bf269beab..270ff83efc11 100644
--- a/arch/x86/kernel/init_task.c
+++ b/arch/x86/kernel/init_task.c
@@ -12,7 +12,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
/*
* Initial thread structure.
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 9c4461501fcb..9371448290ac 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -236,6 +236,7 @@ static const struct file_operations microcode_fops = {
static struct miscdevice microcode_dev = {
.minor = MICROCODE_MINOR,
.name = "microcode",
+ .devnode = "cpu/microcode",
.fops = &microcode_fops,
};
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 3cf3413ec626..98fd6cd4e3a4 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -196,6 +196,11 @@ static struct notifier_block __refdata msr_class_cpu_notifier = {
.notifier_call = msr_class_cpu_callback,
};
+static char *msr_nodename(struct device *dev)
+{
+ return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt));
+}
+
static int __init msr_init(void)
{
int i, err = 0;
@@ -212,6 +217,7 @@ static int __init msr_init(void)
err = PTR_ERR(msr_class);
goto out_chrdev;
}
+ msr_class->nodename = msr_nodename;
for_each_online_cpu(i) {
err = msr_device_create(i);
if (err != 0)
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 3bb2be1649bd..994dd6a4a2a0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -63,7 +63,7 @@ void arch_task_cache_init(void)
task_xstate_cachep =
kmem_cache_create("task_xstate", xstate_size,
__alignof__(union thread_xstate),
- SLAB_PANIC, NULL);
+ SLAB_PANIC | SLAB_NOTRACK, NULL);
}
/*
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 4aaf7e48394f..c3eb207181fe 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -77,6 +77,13 @@ void save_stack_trace(struct stack_trace *trace)
}
EXPORT_SYMBOL_GPL(save_stack_trace);
+void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp)
+{
+ dump_trace(current, NULL, NULL, bp, &save_stack_ops, trace);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 1e1e27b7d438..5f935f0d5861 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -45,6 +45,7 @@
#include <linux/edac.h>
#endif
+#include <asm/kmemcheck.h>
#include <asm/stacktrace.h>
#include <asm/processor.h>
#include <asm/debugreg.h>
@@ -534,6 +535,10 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
get_debugreg(condition, 6);
+ /* Catch kmemcheck conditions first of all! */
+ if (condition & DR_STEP && kmemcheck_trap(regs))
+ return;
+
/*
* The processor cleared BTF, so don't mark that we need it set.
*/
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 3e1c057e98fe..ae3180c506a6 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/clocksource.h>
#include <linux/percpu.h>
+#include <linux/timex.h>
#include <asm/hpet.h>
#include <asm/timer.h>
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 32d6ae8fb60e..e770bf349ec4 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1277,7 +1277,7 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
struct page *pages;
struct vmcs *vmcs;
- pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
+ pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
if (!pages)
return NULL;
vmcs = page_address(pages);
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index fdd30d08ab52..eefdeee8a871 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -10,6 +10,8 @@ obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o
obj-$(CONFIG_HIGHMEM) += highmem_32.o
+obj-$(CONFIG_KMEMCHECK) += kmemcheck/
+
obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index c6acc6326374..baa0e86adfbc 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -14,6 +14,7 @@
#include <asm/traps.h> /* dotraplinkage, ... */
#include <asm/pgalloc.h> /* pgd_*(), ... */
+#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
/*
* Page fault error code bits:
@@ -956,6 +957,13 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
/* Get the faulting address: */
address = read_cr2();
+ /*
+ * Detect and handle instructions that would cause a page fault for
+ * both a tracked kernel page and a userspace page.
+ */
+ if (kmemcheck_active(regs))
+ kmemcheck_hide(regs);
+
if (unlikely(kmmio_fault(regs, address)))
return;
@@ -973,9 +981,13 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
* protection error (error_code & 9) == 0.
*/
if (unlikely(fault_in_kernel_space(address))) {
- if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
- vmalloc_fault(address) >= 0)
- return;
+ if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
+ if (vmalloc_fault(address) >= 0)
+ return;
+
+ if (kmemcheck_fault(regs, address, error_code))
+ return;
+ }
/* Can handle a stale RO->RW TLB: */
if (spurious_fault(error_code, address))
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 34c1bfb64f1c..f53b57e4086f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -213,7 +213,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
if (!after_bootmem)
init_gbpages();
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
/*
* For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
* This will simplify cpa(), which otherwise needs to support splitting
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 9ff3c0816d15..3cd7711bb949 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -111,7 +111,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
pte_t *page_table = NULL;
if (after_bootmem) {
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
#endif
if (!page_table)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 52bb9519bb86..9c543290a813 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -104,7 +104,7 @@ static __ref void *spp_getpage(void)
void *ptr;
if (after_bootmem)
- ptr = (void *) get_zeroed_page(GFP_ATOMIC);
+ ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
else
ptr = alloc_bootmem_pages(PAGE_SIZE);
@@ -281,7 +281,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
void *adr;
if (after_bootmem) {
- adr = (void *)get_zeroed_page(GFP_ATOMIC);
+ adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
*phys = __pa(adr);
return adr;
diff --git a/arch/x86/mm/kmemcheck/Makefile b/arch/x86/mm/kmemcheck/Makefile
new file mode 100644
index 000000000000..520b3bce4095
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/Makefile
@@ -0,0 +1 @@
+obj-y := error.o kmemcheck.o opcode.o pte.o selftest.o shadow.o
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
new file mode 100644
index 000000000000..4901d0dafda6
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/error.c
@@ -0,0 +1,228 @@
+#include <linux/interrupt.h>
+#include <linux/kdebug.h>
+#include <linux/kmemcheck.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/stacktrace.h>
+#include <linux/string.h>
+
+#include "error.h"
+#include "shadow.h"
+
+enum kmemcheck_error_type {
+ KMEMCHECK_ERROR_INVALID_ACCESS,
+ KMEMCHECK_ERROR_BUG,
+};
+
+#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT)
+
+struct kmemcheck_error {
+ enum kmemcheck_error_type type;
+
+ union {
+ /* KMEMCHECK_ERROR_INVALID_ACCESS */
+ struct {
+ /* Kind of access that caused the error */
+ enum kmemcheck_shadow state;
+ /* Address and size of the erroneous read */
+ unsigned long address;
+ unsigned int size;
+ };
+ };
+
+ struct pt_regs regs;
+ struct stack_trace trace;
+ unsigned long trace_entries[32];
+
+ /* We compress it to a char. */
+ unsigned char shadow_copy[SHADOW_COPY_SIZE];
+ unsigned char memory_copy[SHADOW_COPY_SIZE];
+};
+
+/*
+ * Create a ring queue of errors to output. We can't call printk() directly
+ * from the kmemcheck traps, since this may call the console drivers and
+ * result in a recursive fault.
+ */
+static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE];
+static unsigned int error_count;
+static unsigned int error_rd;
+static unsigned int error_wr;
+static unsigned int error_missed_count;
+
+static struct kmemcheck_error *error_next_wr(void)
+{
+ struct kmemcheck_error *e;
+
+ if (error_count == ARRAY_SIZE(error_fifo)) {
+ ++error_missed_count;
+ return NULL;
+ }
+
+ e = &error_fifo[error_wr];
+ if (++error_wr == ARRAY_SIZE(error_fifo))
+ error_wr = 0;
+ ++error_count;
+ return e;
+}
+
+static struct kmemcheck_error *error_next_rd(void)
+{
+ struct kmemcheck_error *e;
+
+ if (error_count == 0)
+ return NULL;
+
+ e = &error_fifo[error_rd];
+ if (++error_rd == ARRAY_SIZE(error_fifo))
+ error_rd = 0;
+ --error_count;
+ return e;
+}
+
+void kmemcheck_error_recall(void)
+{
+ static const char *desc[] = {
+ [KMEMCHECK_SHADOW_UNALLOCATED] = "unallocated",
+ [KMEMCHECK_SHADOW_UNINITIALIZED] = "uninitialized",
+ [KMEMCHECK_SHADOW_INITIALIZED] = "initialized",
+ [KMEMCHECK_SHADOW_FREED] = "freed",
+ };
+
+ static const char short_desc[] = {
+ [KMEMCHECK_SHADOW_UNALLOCATED] = 'a',
+ [KMEMCHECK_SHADOW_UNINITIALIZED] = 'u',
+ [KMEMCHECK_SHADOW_INITIALIZED] = 'i',
+ [KMEMCHECK_SHADOW_FREED] = 'f',
+ };
+
+ struct kmemcheck_error *e;
+ unsigned int i;
+
+ e = error_next_rd();
+ if (!e)
+ return;
+
+ switch (e->type) {
+ case KMEMCHECK_ERROR_INVALID_ACCESS:
+ printk(KERN_ERR "WARNING: kmemcheck: Caught %d-bit read "
+ "from %s memory (%p)\n",
+ 8 * e->size, e->state < ARRAY_SIZE(desc) ?
+ desc[e->state] : "(invalid shadow state)",
+ (void *) e->address);
+
+ printk(KERN_INFO);
+ for (i = 0; i < SHADOW_COPY_SIZE; ++i)
+ printk("%02x", e->memory_copy[i]);
+ printk("\n");
+
+ printk(KERN_INFO);
+ for (i = 0; i < SHADOW_COPY_SIZE; ++i) {
+ if (e->shadow_copy[i] < ARRAY_SIZE(short_desc))
+ printk(" %c", short_desc[e->shadow_copy[i]]);
+ else
+ printk(" ?");
+ }
+ printk("\n");
+ printk(KERN_INFO "%*c\n", 2 + 2
+ * (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^');
+ break;
+ case KMEMCHECK_ERROR_BUG:
+ printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n");
+ break;
+ }
+
+ __show_regs(&e->regs, 1);
+ print_stack_trace(&e->trace, 0);
+}
+
+static void do_wakeup(unsigned long data)
+{
+ while (error_count > 0)
+ kmemcheck_error_recall();
+
+ if (error_missed_count > 0) {
+ printk(KERN_WARNING "kmemcheck: Lost %d error reports because "
+ "the queue was too small\n", error_missed_count);
+ error_missed_count = 0;
+ }
+}
+
+static DECLARE_TASKLET(kmemcheck_tasklet, &do_wakeup, 0);
+
+/*
+ * Save the context of an error report.
+ */
+void kmemcheck_error_save(enum kmemcheck_shadow state,
+ unsigned long address, unsigned int size, struct pt_regs *regs)
+{
+ static unsigned long prev_ip;
+
+ struct kmemcheck_error *e;
+ void *shadow_copy;
+ void *memory_copy;
+
+ /* Don't report several adjacent errors from the same EIP. */
+ if (regs->ip == prev_ip)
+ return;
+ prev_ip = regs->ip;
+
+ e = error_next_wr();
+ if (!e)
+ return;
+
+ e->type = KMEMCHECK_ERROR_INVALID_ACCESS;
+
+ e->state = state;
+ e->address = address;
+ e->size = size;
+
+ /* Save regs */
+ memcpy(&e->regs, regs, sizeof(*regs));
+
+ /* Save stack trace */
+ e->trace.nr_entries = 0;
+ e->trace.entries = e->trace_entries;
+ e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
+ e->trace.skip = 0;
+ save_stack_trace_bp(&e->trace, regs->bp);
+
+ /* Round address down to nearest 16 bytes */
+ shadow_copy = kmemcheck_shadow_lookup(address
+ & ~(SHADOW_COPY_SIZE - 1));
+ BUG_ON(!shadow_copy);
+
+ memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE);
+
+ kmemcheck_show_addr(address);
+ memory_copy = (void *) (address & ~(SHADOW_COPY_SIZE - 1));
+ memcpy(e->memory_copy, memory_copy, SHADOW_COPY_SIZE);
+ kmemcheck_hide_addr(address);
+
+ tasklet_hi_schedule_first(&kmemcheck_tasklet);
+}
+
+/*
+ * Save the context of a kmemcheck bug.
+ */
+void kmemcheck_error_save_bug(struct pt_regs *regs)
+{
+ struct kmemcheck_error *e;
+
+ e = error_next_wr();
+ if (!e)
+ return;
+
+ e->type = KMEMCHECK_ERROR_BUG;
+
+ memcpy(&e->regs, regs, sizeof(*regs));
+
+ e->trace.nr_entries = 0;
+ e->trace.entries = e->trace_entries;
+ e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
+ e->trace.skip = 1;
+ save_stack_trace(&e->trace);
+
+ tasklet_hi_schedule_first(&kmemcheck_tasklet);
+}
diff --git a/arch/x86/mm/kmemcheck/error.h b/arch/x86/mm/kmemcheck/error.h
new file mode 100644
index 000000000000..0efc2e8d0a20
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/error.h
@@ -0,0 +1,15 @@
+#ifndef ARCH__X86__MM__KMEMCHECK__ERROR_H
+#define ARCH__X86__MM__KMEMCHECK__ERROR_H
+
+#include <linux/ptrace.h>
+
+#include "shadow.h"
+
+void kmemcheck_error_save(enum kmemcheck_shadow state,
+ unsigned long address, unsigned int size, struct pt_regs *regs);
+
+void kmemcheck_error_save_bug(struct pt_regs *regs);
+
+void kmemcheck_error_recall(void);
+
+#endif
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
new file mode 100644
index 000000000000..2c55ed098654
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -0,0 +1,640 @@
+/**
+ * kmemcheck - a heavyweight memory checker for the linux kernel
+ * Copyright (C) 2007, 2008 Vegard Nossum <vegardno@ifi.uio.no>
+ * (With a lot of help from Ingo Molnar and Pekka Enberg.)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/kmemcheck.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/page-flags.h>
+#include <linux/percpu.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/cacheflush.h>
+#include <asm/kmemcheck.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+#include "error.h"
+#include "opcode.h"
+#include "pte.h"
+#include "selftest.h"
+#include "shadow.h"
+
+
+#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT
+# define KMEMCHECK_ENABLED 0
+#endif
+
+#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT
+# define KMEMCHECK_ENABLED 1
+#endif
+
+#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT
+# define KMEMCHECK_ENABLED 2
+#endif
+
+int kmemcheck_enabled = KMEMCHECK_ENABLED;
+
+int __init kmemcheck_init(void)
+{
+#ifdef CONFIG_SMP
+ /*
+ * Limit SMP to use a single CPU. We rely on the fact that this code
+ * runs before SMP is set up.
+ */
+ if (setup_max_cpus > 1) {
+ printk(KERN_INFO
+ "kmemcheck: Limiting number of CPUs to 1.\n");
+ setup_max_cpus = 1;
+ }
+#endif
+
+ if (!kmemcheck_selftest()) {
+ printk(KERN_INFO "kmemcheck: self-tests failed; disabling\n");
+ kmemcheck_enabled = 0;
+ return -EINVAL;
+ }
+
+ printk(KERN_INFO "kmemcheck: Initialized\n");
+ return 0;
+}
+
+early_initcall(kmemcheck_init);
+
+/*
+ * We need to parse the kmemcheck= option before any memory is allocated.
+ */
+static int __init param_kmemcheck(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ sscanf(str, "%d", &kmemcheck_enabled);
+ return 0;
+}
+
+early_param("kmemcheck", param_kmemcheck);
+
+int kmemcheck_show_addr(unsigned long address)
+{
+ pte_t *pte;
+
+ pte = kmemcheck_pte_lookup(address);
+ if (!pte)
+ return 0;
+
+ set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+ __flush_tlb_one(address);
+ return 1;
+}
+
+int kmemcheck_hide_addr(unsigned long address)
+{
+ pte_t *pte;
+
+ pte = kmemcheck_pte_lookup(address);
+ if (!pte)
+ return 0;
+
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+ __flush_tlb_one(address);
+ return 1;
+}
+
+struct kmemcheck_context {
+ bool busy;
+ int balance;
+
+ /*
+ * There can be at most two memory operands to an instruction, but
+ * each address can cross a page boundary -- so we may need up to
+ * four addresses that must be hidden/revealed for each fault.
+ */
+ unsigned long addr[4];
+ unsigned long n_addrs;
+ unsigned long flags;
+
+ /* Data size of the instruction that caused a fault. */
+ unsigned int size;
+};
+
+static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context);
+
+bool kmemcheck_active(struct pt_regs *regs)
+{
+ struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+
+ return data->balance > 0;
+}
+
+/* Save an address that needs to be shown/hidden */
+static void kmemcheck_save_addr(unsigned long addr)
+{
+ struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+
+ BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr));
+ data->addr[data->n_addrs++] = addr;
+}
+
+static unsigned int kmemcheck_show_all(void)
+{
+ struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ unsigned int i;
+ unsigned int n;
+
+ n = 0;
+ for (i = 0; i < data->n_addrs; ++i)
+ n += kmemcheck_show_addr(data->addr[i]);
+
+ return n;
+}
+
+static unsigned int kmemcheck_hide_all(void)
+{
+ struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ unsigned int i;
+ unsigned int n;
+
+ n = 0;
+ for (i = 0; i < data->n_addrs; ++i)
+ n += kmemcheck_hide_addr(data->addr[i]);
+
+ return n;
+}
+
+/*
+ * Called from the #PF handler.
+ */
+void kmemcheck_show(struct pt_regs *regs)
+{
+ struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+
+ BUG_ON(!irqs_disabled());
+
+ if (unlikely(data->balance != 0)) {
+ kmemcheck_show_all();
+ kmemcheck_error_save_bug(regs);
+ data->balance = 0;
+ return;
+ }
+
+ /*
+ * None of the addresses actually belonged to kmemcheck. Note that
+ * this is not an error.
+ */
+ if (kmemcheck_show_all() == 0)
+ return;
+
+ ++data->balance;
+
+ /*
+ * The IF needs to be cleared as well, so that the faulting
+ * instruction can run "uninterrupted". Otherwise, we might take
+ * an interrupt and start executing that before we've had a chance
+ * to hide the page again.
+ *
+ * NOTE: In the rare case of multiple faults, we must not override
+ * the original flags:
+ */
+ if (!(regs->flags & X86_EFLAGS_TF))
+ data->flags = regs->flags;
+
+ regs->flags |= X86_EFLAGS_TF;
+ regs->flags &= ~X86_EFLAGS_IF;
+}
+
+/*
+ * Called from the #DB handler.
+ */
+void kmemcheck_hide(struct pt_regs *regs)
+{
+ struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ int n;
+
+ BUG_ON(!irqs_disabled());
+
+ if (data->balance == 0)
+ return;
+
+ if (unlikely(data->balance != 1)) {
+ kmemcheck_show_all();
+ kmemcheck_error_save_bug(regs);
+ data->n_addrs = 0;
+ data->balance = 0;
+
+ if (!(data->flags & X86_EFLAGS_TF))
+ regs->flags &= ~X86_EFLAGS_TF;
+ if (data->flags & X86_EFLAGS_IF)
+ regs->flags |= X86_EFLAGS_IF;
+ return;
+ }
+
+ if (kmemcheck_enabled)
+ n = kmemcheck_hide_all();
+ else
+ n = kmemcheck_show_all();
+
+ if (n == 0)
+ return;
+
+ --data->balance;
+
+ data->n_addrs = 0;
+
+ if (!(data->flags & X86_EFLAGS_TF))
+ regs->flags &= ~X86_EFLAGS_TF;
+ if (data->flags & X86_EFLAGS_IF)
+ regs->flags |= X86_EFLAGS_IF;
+}
+
+void kmemcheck_show_pages(struct page *p, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; ++i) {
+ unsigned long address;
+ pte_t *pte;
+ unsigned int level;
+
+ address = (unsigned long) page_address(&p[i]);
+ pte = lookup_address(address, &level);
+ BUG_ON(!pte);
+ BUG_ON(level != PG_LEVEL_4K);
+
+ set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_HIDDEN));
+ __flush_tlb_one(address);
+ }
+}
+
+bool kmemcheck_page_is_tracked(struct page *p)
+{
+ /* This will also check the "hidden" flag of the PTE. */
+ return kmemcheck_pte_lookup((unsigned long) page_address(p));
+}
+
+void kmemcheck_hide_pages(struct page *p, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; ++i) {
+ unsigned long address;
+ pte_t *pte;
+ unsigned int level;
+
+ address = (unsigned long) page_address(&p[i]);
+ pte = lookup_address(address, &level);
+ BUG_ON(!pte);
+ BUG_ON(level != PG_LEVEL_4K);
+
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+ set_pte(pte, __pte(pte_val(*pte) | _PAGE_HIDDEN));
+ __flush_tlb_one(address);
+ }
+}
+
+/* Access may NOT cross page boundary */
+static void kmemcheck_read_strict(struct pt_regs *regs,
+ unsigned long addr, unsigned int size)
+{
+ void *shadow;
+ enum kmemcheck_shadow status;
+
+ shadow = kmemcheck_shadow_lookup(addr);
+ if (!shadow)
+ return;
+
+ kmemcheck_save_addr(addr);
+ status = kmemcheck_shadow_test(shadow, size);
+ if (status == KMEMCHECK_SHADOW_INITIALIZED)
+ return;
+
+ if (kmemcheck_enabled)
+ kmemcheck_error_save(status, addr, size, regs);
+
+ if (kmemcheck_enabled == 2)
+ kmemcheck_enabled = 0;
+
+ /* Don't warn about it again. */
+ kmemcheck_shadow_set(shadow, size);
+}
+
+/* Access may cross page boundary */
+static void kmemcheck_read(struct pt_regs *regs,
+ unsigned long addr, unsigned int size)
+{
+ unsigned long page = addr & PAGE_MASK;
+ unsigned long next_addr = addr + size - 1;
+ unsigned long next_page = next_addr & PAGE_MASK;
+
+ if (likely(page == next_page)) {
+ kmemcheck_read_strict(regs, addr, size);
+ return;
+ }
+
+ /*
+ * What we do is basically to split the access across the
+ * two pages and handle each part separately. Yes, this means
+ * that we may now see reads that are 3 + 5 bytes, for
+ * example (and if both are uninitialized, there will be two
+ * reports), but it makes the code a lot simpler.
+ */
+ kmemcheck_read_strict(regs, addr, next_page - addr);
+ kmemcheck_read_strict(regs, next_page, next_addr - next_page);
+}
+
+static void kmemcheck_write_strict(struct pt_regs *regs,
+ unsigned long addr, unsigned int size)
+{
+ void *shadow;
+
+ shadow = kmemcheck_shadow_lookup(addr);
+ if (!shadow)
+ return;
+
+ kmemcheck_save_addr(addr);
+ kmemcheck_shadow_set(shadow, size);
+}
+
+static void kmemcheck_write(struct pt_regs *regs,
+ unsigned long addr, unsigned int size)
+{
+ unsigned long page = addr & PAGE_MASK;
+ unsigned long next_addr = addr + size - 1;
+ unsigned long next_page = next_addr & PAGE_MASK;
+
+ if (likely(page == next_page)) {
+ kmemcheck_write_strict(regs, addr, size);
+ return;
+ }
+
+ /* See comment in kmemcheck_read(). */
+ kmemcheck_write_strict(regs, addr, next_page - addr);
+ kmemcheck_write_strict(regs, next_page, next_addr - next_page);
+}
+
+/*
+ * Copying is hard. We have two addresses, each of which may be split across
+ * a page (and each page will have different shadow addresses).
+ */
+static void kmemcheck_copy(struct pt_regs *regs,
+ unsigned long src_addr, unsigned long dst_addr, unsigned int size)
+{
+ uint8_t shadow[8];
+ enum kmemcheck_shadow status;
+
+ unsigned long page;
+ unsigned long next_addr;
+ unsigned long next_page;
+
+ uint8_t *x;
+ unsigned int i;
+ unsigned int n;
+
+ BUG_ON(size > sizeof(shadow));
+
+ page = src_addr & PAGE_MASK;
+ next_addr = src_addr + size - 1;
+ next_page = next_addr & PAGE_MASK;
+
+ if (likely(page == next_page)) {
+ /* Same page */
+ x = kmemcheck_shadow_lookup(src_addr);
+ if (x) {
+ kmemcheck_save_addr(src_addr);
+ for (i = 0; i < size; ++i)
+ shadow[i] = x[i];
+ } else {
+ for (i = 0; i < size; ++i)
+ shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+ }
+ } else {
+ n = next_page - src_addr;
+ BUG_ON(n > sizeof(shadow));
+
+ /* First page */
+ x = kmemcheck_shadow_lookup(src_addr);
+ if (x) {
+ kmemcheck_save_addr(src_addr);
+ for (i = 0; i < n; ++i)
+ shadow[i] = x[i];
+ } else {
+ /* Not tracked */
+ for (i = 0; i < n; ++i)
+ shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+ }
+
+ /* Second page */
+ x = kmemcheck_shadow_lookup(next_page);
+ if (x) {
+ kmemcheck_save_addr(next_page);
+ for (i = n; i < size; ++i)
+ shadow[i] = x[i - n];
+ } else {
+ /* Not tracked */
+ for (i = n; i < size; ++i)
+ shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+ }
+ }
+
+ page = dst_addr & PAGE_MASK;
+ next_addr = dst_addr + size - 1;
+ next_page = next_addr & PAGE_MASK;
+
+ if (likely(page == next_page)) {
+ /* Same page */
+ x = kmemcheck_shadow_lookup(dst_addr);
+ if (x) {
+ kmemcheck_save_addr(dst_addr);
+ for (i = 0; i < size; ++i) {
+ x[i] = shadow[i];
+ shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+ }
+ }
+ } else {
+ n = next_page - dst_addr;
+ BUG_ON(n > sizeof(shadow));
+
+ /* First page */
+ x = kmemcheck_shadow_lookup(dst_addr);
+ if (x) {
+ kmemcheck_save_addr(dst_addr);
+ for (i = 0; i < n; ++i) {
+ x[i] = shadow[i];
+ shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+ }
+ }
+
+ /* Second page */
+ x = kmemcheck_shadow_lookup(next_page);
+ if (x) {
+ kmemcheck_save_addr(next_page);
+ for (i = n; i < size; ++i) {
+ x[i - n] = shadow[i];
+ shadow[i] = KMEMCHECK_SHADOW_INITIALIZED;
+ }
+ }
+ }
+
+ status = kmemcheck_shadow_test(shadow, size);
+ if (status == KMEMCHECK_SHADOW_INITIALIZED)
+ return;
+
+ if (kmemcheck_enabled)
+ kmemcheck_error_save(status, src_addr, size, regs);
+
+ if (kmemcheck_enabled == 2)
+ kmemcheck_enabled = 0;
+}
+
+enum kmemcheck_method {
+ KMEMCHECK_READ,
+ KMEMCHECK_WRITE,
+};
+
+static void kmemcheck_access(struct pt_regs *regs,
+ unsigned long fallback_address, enum kmemcheck_method fallback_method)
+{
+ const uint8_t *insn;
+ const uint8_t *insn_primary;
+ unsigned int size;
+
+ struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+
+ /* Recursive fault -- ouch. */
+ if (data->busy) {
+ kmemcheck_show_addr(fallback_address);
+ kmemcheck_error_save_bug(regs);
+ return;
+ }
+
+ data->busy = true;
+
+ insn = (const uint8_t *) regs->ip;
+ insn_primary = kmemcheck_opcode_get_primary(insn);
+
+ kmemcheck_opcode_decode(insn, &size);
+
+ switch (insn_primary[0]) {
+#ifdef CONFIG_KMEMCHECK_BITOPS_OK
+ /* AND, OR, XOR */
+ /*
+ * Unfortunately, these instructions have to be excluded from
+ * our regular checking since they access only some (and not
+ * all) bits. This clears out "bogus" bitfield-access warnings.
+ */
+ case 0x80:
+ case 0x81:
+ case 0x82:
+ case 0x83:
+ switch ((insn_primary[1] >> 3) & 7) {
+ /* OR */
+ case 1:
+ /* AND */
+ case 4:
+ /* XOR */
+ case 6:
+ kmemcheck_write(regs, fallback_address, size);
+ goto out;
+
+ /* ADD */
+ case 0:
+ /* ADC */
+ case 2:
+ /* SBB */
+ case 3:
+ /* SUB */
+ case 5:
+ /* CMP */
+ case 7:
+ break;
+ }
+ break;
+#endif
+
+ /* MOVS, MOVSB, MOVSW, MOVSD */
+ case 0xa4:
+ case 0xa5:
+ /*
+ * These instructions are special because they take two
+ * addresses, but we only get one page fault.
+ */
+ kmemcheck_copy(regs, regs->si, regs->di, size);
+ goto out;
+
+ /* CMPS, CMPSB, CMPSW, CMPSD */
+ case 0xa6:
+ case 0xa7:
+ kmemcheck_read(regs, regs->si, size);
+ kmemcheck_read(regs, regs->di, size);
+ goto out;
+ }
+
+ /*
+ * If the opcode isn't special in any way, we use the data from the
+ * page fault handler to determine the address and type of memory
+ * access.
+ */
+ switch (fallback_method) {
+ case KMEMCHECK_READ:
+ kmemcheck_read(regs, fallback_address, size);
+ goto out;
+ case KMEMCHECK_WRITE:
+ kmemcheck_write(regs, fallback_address, size);
+ goto out;
+ }
+
+out:
+ data->busy = false;
+}
+
+bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code)
+{
+ pte_t *pte;
+
+ /*
+ * XXX: Is it safe to assume that memory accesses from virtual 86
+ * mode or non-kernel code segments will _never_ access kernel
+ * memory (e.g. tracked pages)? For now, we need this to avoid
+ * invoking kmemcheck for PnP BIOS calls.
+ */
+ if (regs->flags & X86_VM_MASK)
+ return false;
+ if (regs->cs != __KERNEL_CS)
+ return false;
+
+ pte = kmemcheck_pte_lookup(address);
+ if (!pte)
+ return false;
+
+ if (error_code & 2)
+ kmemcheck_access(regs, address, KMEMCHECK_WRITE);
+ else
+ kmemcheck_access(regs, address, KMEMCHECK_READ);
+
+ kmemcheck_show(regs);
+ return true;
+}
+
+bool kmemcheck_trap(struct pt_regs *regs)
+{
+ if (!kmemcheck_active(regs))
+ return false;
+
+ /* We're done. */
+ kmemcheck_hide(regs);
+ return true;
+}
diff --git a/arch/x86/mm/kmemcheck/opcode.c b/arch/x86/mm/kmemcheck/opcode.c
new file mode 100644
index 000000000000..63c19e27aa6f
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/opcode.c
@@ -0,0 +1,106 @@
+#include <linux/types.h>
+
+#include "opcode.h"
+
+static bool opcode_is_prefix(uint8_t b)
+{
+ return
+ /* Group 1 */
+ b == 0xf0 || b == 0xf2 || b == 0xf3
+ /* Group 2 */
+ || b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26
+ || b == 0x64 || b == 0x65 || b == 0x2e || b == 0x3e
+ /* Group 3 */
+ || b == 0x66
+ /* Group 4 */
+ || b == 0x67;
+}
+
+#ifdef CONFIG_X86_64
+static bool opcode_is_rex_prefix(uint8_t b)
+{
+ return (b & 0xf0) == 0x40;
+}
+#else
+static bool opcode_is_rex_prefix(uint8_t b)
+{
+ return false;
+}
+#endif
+
+#define REX_W (1 << 3)
+
+/*
+ * This is a VERY crude opcode decoder. We only need to find the size of the
+ * load/store that caused our #PF and this should work for all the opcodes
+ * that we care about. Moreover, the ones who invented this instruction set
+ * should be shot.
+ */
+void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size)
+{
+ /* Default operand size */
+ int operand_size_override = 4;
+
+ /* prefixes */
+ for (; opcode_is_prefix(*op); ++op) {
+ if (*op == 0x66)
+ operand_size_override = 2;
+ }
+
+ /* REX prefix */
+ if (opcode_is_rex_prefix(*op)) {
+ uint8_t rex = *op;
+
+ ++op;
+ if (rex & REX_W) {
+ switch (*op) {
+ case 0x63:
+ *size = 4;
+ return;
+ case 0x0f:
+ ++op;
+
+ switch (*op) {
+ case 0xb6:
+ case 0xbe:
+ *size = 1;
+ return;
+ case 0xb7:
+ case 0xbf:
+ *size = 2;
+ return;
+ }
+
+ break;
+ }
+
+ *size = 8;
+ return;
+ }
+ }
+
+ /* escape opcode */
+ if (*op == 0x0f) {
+ ++op;
+
+ /*
+ * This is move with zero-extend and sign-extend, respectively;
+ * we don't have to think about 0xb6/0xbe, because this is
+ * already handled in the conditional below.
+ */
+ if (*op == 0xb7 || *op == 0xbf)
+ operand_size_override = 2;
+ }
+
+ *size = (*op & 1) ? operand_size_override : 1;
+}
+
+const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op)
+{
+ /* skip prefixes */
+ while (opcode_is_prefix(*op))
+ ++op;
+ if (opcode_is_rex_prefix(*op))
+ ++op;
+ return op;
+}
diff --git a/arch/x86/mm/kmemcheck/opcode.h b/arch/x86/mm/kmemcheck/opcode.h
new file mode 100644
index 000000000000..6956aad66b5b
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/opcode.h
@@ -0,0 +1,9 @@
+#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H
+#define ARCH__X86__MM__KMEMCHECK__OPCODE_H
+
+#include <linux/types.h>
+
+void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size);
+const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op);
+
+#endif
diff --git a/arch/x86/mm/kmemcheck/pte.c b/arch/x86/mm/kmemcheck/pte.c
new file mode 100644
index 000000000000..4ead26eeaf96
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/pte.c
@@ -0,0 +1,22 @@
+#include <linux/mm.h>
+
+#include <asm/pgtable.h>
+
+#include "pte.h"
+
+pte_t *kmemcheck_pte_lookup(unsigned long address)
+{
+ pte_t *pte;
+ unsigned int level;
+
+ pte = lookup_address(address, &level);
+ if (!pte)
+ return NULL;
+ if (level != PG_LEVEL_4K)
+ return NULL;
+ if (!pte_hidden(*pte))
+ return NULL;
+
+ return pte;
+}
+
diff --git a/arch/x86/mm/kmemcheck/pte.h b/arch/x86/mm/kmemcheck/pte.h
new file mode 100644
index 000000000000..9f5966456492
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/pte.h
@@ -0,0 +1,10 @@
+#ifndef ARCH__X86__MM__KMEMCHECK__PTE_H
+#define ARCH__X86__MM__KMEMCHECK__PTE_H
+
+#include <linux/mm.h>
+
+#include <asm/pgtable.h>
+
+pte_t *kmemcheck_pte_lookup(unsigned long address);
+
+#endif
diff --git a/arch/x86/mm/kmemcheck/selftest.c b/arch/x86/mm/kmemcheck/selftest.c
new file mode 100644
index 000000000000..036efbea8b28
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/selftest.c
@@ -0,0 +1,69 @@
+#include <linux/kernel.h>
+
+#include "opcode.h"
+#include "selftest.h"
+
+struct selftest_opcode {
+ unsigned int expected_size;
+ const uint8_t *insn;
+ const char *desc;
+};
+
+static const struct selftest_opcode selftest_opcodes[] = {
+ /* REP MOVS */
+ {1, "\xf3\xa4", "rep movsb <mem8>, <mem8>"},
+ {4, "\xf3\xa5", "rep movsl <mem32>, <mem32>"},
+
+ /* MOVZX / MOVZXD */
+ {1, "\x66\x0f\xb6\x51\xf8", "movzwq <mem8>, <reg16>"},
+ {1, "\x0f\xb6\x51\xf8", "movzwq <mem8>, <reg32>"},
+
+ /* MOVSX / MOVSXD */
+ {1, "\x66\x0f\xbe\x51\xf8", "movswq <mem8>, <reg16>"},
+ {1, "\x0f\xbe\x51\xf8", "movswq <mem8>, <reg32>"},
+
+#ifdef CONFIG_X86_64
+ /* MOVZX / MOVZXD */
+ {1, "\x49\x0f\xb6\x51\xf8", "movzbq <mem8>, <reg64>"},
+ {2, "\x49\x0f\xb7\x51\xf8", "movzbq <mem16>, <reg64>"},
+
+ /* MOVSX / MOVSXD */
+ {1, "\x49\x0f\xbe\x51\xf8", "movsbq <mem8>, <reg64>"},
+ {2, "\x49\x0f\xbf\x51\xf8", "movsbq <mem16>, <reg64>"},
+ {4, "\x49\x63\x51\xf8", "movslq <mem32>, <reg64>"},
+#endif
+};
+
+static bool selftest_opcode_one(const struct selftest_opcode *op)
+{
+ unsigned size;
+
+ kmemcheck_opcode_decode(op->insn, &size);
+
+ if (size == op->expected_size)
+ return true;
+
+ printk(KERN_WARNING "kmemcheck: opcode %s: expected size %d, got %d\n",
+ op->desc, op->expected_size, size);
+ return false;
+}
+
+static bool selftest_opcodes_all(void)
+{
+ bool pass = true;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(selftest_opcodes); ++i)
+ pass = pass && selftest_opcode_one(&selftest_opcodes[i]);
+
+ return pass;
+}
+
+bool kmemcheck_selftest(void)
+{
+ bool pass = true;
+
+ pass = pass && selftest_opcodes_all();
+
+ return pass;
+}
diff --git a/arch/x86/mm/kmemcheck/selftest.h b/arch/x86/mm/kmemcheck/selftest.h
new file mode 100644
index 000000000000..8fed4fe11f95
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/selftest.h
@@ -0,0 +1,6 @@
+#ifndef ARCH_X86_MM_KMEMCHECK_SELFTEST_H
+#define ARCH_X86_MM_KMEMCHECK_SELFTEST_H
+
+bool kmemcheck_selftest(void);
+
+#endif
diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c
new file mode 100644
index 000000000000..e773b6bd0079
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/shadow.c
@@ -0,0 +1,162 @@
+#include <linux/kmemcheck.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#include "pte.h"
+#include "shadow.h"
+
+/*
+ * Return the shadow address for the given address. Returns NULL if the
+ * address is not tracked.
+ *
+ * We need to be extremely careful not to follow any invalid pointers,
+ * because this function can be called for *any* possible address.
+ */
+void *kmemcheck_shadow_lookup(unsigned long address)
+{
+ pte_t *pte;
+ struct page *page;
+
+ if (!virt_addr_valid(address))
+ return NULL;
+
+ pte = kmemcheck_pte_lookup(address);
+ if (!pte)
+ return NULL;
+
+ page = virt_to_page(address);
+ if (!page->shadow)
+ return NULL;
+ return page->shadow + (address & (PAGE_SIZE - 1));
+}
+
+static void mark_shadow(void *address, unsigned int n,
+ enum kmemcheck_shadow status)
+{
+ unsigned long addr = (unsigned long) address;
+ unsigned long last_addr = addr + n - 1;
+ unsigned long page = addr & PAGE_MASK;
+ unsigned long last_page = last_addr & PAGE_MASK;
+ unsigned int first_n;
+ void *shadow;
+
+ /* If the memory range crosses a page boundary, stop there. */
+ if (page == last_page)
+ first_n = n;
+ else
+ first_n = page + PAGE_SIZE - addr;
+
+ shadow = kmemcheck_shadow_lookup(addr);
+ if (shadow)
+ memset(shadow, status, first_n);
+
+ addr += first_n;
+ n -= first_n;
+
+ /* Do full-page memset()s. */
+ while (n >= PAGE_SIZE) {
+ shadow = kmemcheck_shadow_lookup(addr);
+ if (shadow)
+ memset(shadow, status, PAGE_SIZE);
+
+ addr += PAGE_SIZE;
+ n -= PAGE_SIZE;
+ }
+
+ /* Do the remaining page, if any. */
+ if (n > 0) {
+ shadow = kmemcheck_shadow_lookup(addr);
+ if (shadow)
+ memset(shadow, status, n);
+ }
+}
+
+void kmemcheck_mark_unallocated(void *address, unsigned int n)
+{
+ mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED);
+}
+
+void kmemcheck_mark_uninitialized(void *address, unsigned int n)
+{
+ mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED);
+}
+
+/*
+ * Fill the shadow memory of the given address such that the memory at that
+ * address is marked as being initialized.
+ */
+void kmemcheck_mark_initialized(void *address, unsigned int n)
+{
+ mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED);
+}
+EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized);
+
+void kmemcheck_mark_freed(void *address, unsigned int n)
+{
+ mark_shadow(address, n, KMEMCHECK_SHADOW_FREED);
+}
+
+void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; ++i)
+ kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE);
+}
+
+void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; ++i)
+ kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE);
+}
+
+void kmemcheck_mark_initialized_pages(struct page *p, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 0; i < n; ++i)
+ kmemcheck_mark_initialized(page_address(&p[i]), PAGE_SIZE);
+}
+
+enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
+{
+ uint8_t *x;
+ unsigned int i;
+
+ x = shadow;
+
+#ifdef CONFIG_KMEMCHECK_PARTIAL_OK
+ /*
+ * Make sure _some_ bytes are initialized. Gcc frequently generates
+ * code to access neighboring bytes.
+ */
+ for (i = 0; i < size; ++i) {
+ if (x[i] == KMEMCHECK_SHADOW_INITIALIZED)
+ return x[i];
+ }
+#else
+ /* All bytes must be initialized. */
+ for (i = 0; i < size; ++i) {
+ if (x[i] != KMEMCHECK_SHADOW_INITIALIZED)
+ return x[i];
+ }
+#endif
+
+ return x[0];
+}
+
+void kmemcheck_shadow_set(void *shadow, unsigned int size)
+{
+ uint8_t *x;
+ unsigned int i;
+
+ x = shadow;
+ for (i = 0; i < size; ++i)
+ x[i] = KMEMCHECK_SHADOW_INITIALIZED;
+}
diff --git a/arch/x86/mm/kmemcheck/shadow.h b/arch/x86/mm/kmemcheck/shadow.h
new file mode 100644
index 000000000000..af46d9ab9d86
--- /dev/null
+++ b/arch/x86/mm/kmemcheck/shadow.h
@@ -0,0 +1,16 @@
+#ifndef ARCH__X86__MM__KMEMCHECK__SHADOW_H
+#define ARCH__X86__MM__KMEMCHECK__SHADOW_H
+
+enum kmemcheck_shadow {
+ KMEMCHECK_SHADOW_UNALLOCATED,
+ KMEMCHECK_SHADOW_UNINITIALIZED,
+ KMEMCHECK_SHADOW_INITIALIZED,
+ KMEMCHECK_SHADOW_FREED,
+};
+
+void *kmemcheck_shadow_lookup(unsigned long address);
+
+enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size);
+void kmemcheck_shadow_set(void *shadow, unsigned int size);
+
+#endif
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 6ce9518fe2ac..3cfe9ced8a4c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -470,7 +470,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
if (!debug_pagealloc)
spin_unlock(&cpa_lock);
- base = alloc_pages(GFP_KERNEL, 0);
+ base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
if (!debug_pagealloc)
spin_lock(&cpa_lock);
if (!base)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 7aa03a5389f5..8e43bdd45456 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -4,9 +4,11 @@
#include <asm/tlb.h>
#include <asm/fixmap.h>
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+ return (pte_t *)__get_free_page(PGALLOC_GFP);
}
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
@@ -14,9 +16,9 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
struct page *pte;
#ifdef CONFIG_HIGHPTE
- pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
+ pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0);
#else
- pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+ pte = alloc_pages(PGALLOC_GFP, 0);
#endif
if (pte)
pgtable_page_ctor(pte);
@@ -161,7 +163,7 @@ static int preallocate_pmds(pmd_t *pmds[])
bool failed = false;
for(i = 0; i < PREALLOCATED_PMDS; i++) {
- pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+ pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
if (pmd == NULL)
failed = true;
pmds[i] = pmd;
@@ -228,7 +230,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
pmd_t *pmds[PREALLOCATED_PMDS];
unsigned long flags;
- pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
if (pgd == NULL)
goto out;
diff --git a/arch/xtensa/include/asm/kmap_types.h b/arch/xtensa/include/asm/kmap_types.h
index 9e822d2e3bce..11c687e527f1 100644
--- a/arch/xtensa/include/asm/kmap_types.h
+++ b/arch/xtensa/include/asm/kmap_types.h
@@ -1,31 +1,6 @@
-/*
- * include/asm-xtensa/kmap_types.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
#ifndef _XTENSA_KMAP_TYPES_H
#define _XTENSA_KMAP_TYPES_H
-enum km_type {
- KM_BOUNCE_READ,
- KM_SKB_SUNRPC_DATA,
- KM_SKB_DATA_SOFTIRQ,
- KM_USER0,
- KM_USER1,
- KM_BIO_SRC_IRQ,
- KM_BIO_DST_IRQ,
- KM_PTE0,
- KM_PTE1,
- KM_IRQ0,
- KM_IRQ1,
- KM_SOFTIRQ0,
- KM_SOFTIRQ1,
- KM_TYPE_NR
-};
+#include <asm-generic/kmap_types.h>
#endif /* _XTENSA_KMAP_TYPES_H */
diff --git a/arch/xtensa/kernel/init_task.c b/arch/xtensa/kernel/init_task.c
index e07f5c9fcd35..c4302f0e4ba0 100644
--- a/arch/xtensa/kernel/init_task.c
+++ b/arch/xtensa/kernel/init_task.c
@@ -23,10 +23,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
union thread_union init_thread_union
__attribute__((__section__(".data.init_task"))) =
{ INIT_THREAD_INFO(init_task) };