aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Makefile5
-rw-r--r--arch/arm/boot/Makefile5
-rw-r--r--arch/arm/boot/bootp/Makefile5
-rw-r--r--arch/arm/mach-pxa/leds-mainstone.c6
-rw-r--r--arch/arm/mach-s3c2410/cpu.c2
-rw-r--r--arch/arm/mach-sa1100/collie.c30
-rw-r--r--arch/arm26/Makefile7
-rw-r--r--arch/arm26/boot/Makefile5
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c2
-rw-r--r--arch/cris/kernel/process.c3
-rw-r--r--arch/frv/kernel/gdb-stub.c2
-rw-r--r--arch/h8300/kernel/process.c4
-rw-r--r--arch/i386/Kconfig2
-rw-r--r--arch/i386/Kconfig.debug4
-rw-r--r--arch/i386/Makefile7
-rw-r--r--arch/i386/Makefile.cpu4
-rw-r--r--arch/i386/boot/edd.S2
-rw-r--r--arch/i386/kernel/apm.c2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig24
-rw-r--r--arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c64
-rw-r--r--arch/i386/kernel/cpu/cpufreq/elanfreq.c109
-rw-r--r--arch/i386/kernel/cpu/cpufreq/gx-suspmod.c183
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.h4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c26
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k6.c16
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k7.c10
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c39
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.h6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.c42
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.h20
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-smi.c53
-rw-r--r--arch/i386/kernel/cpu/proc.c2
-rw-r--r--arch/i386/kernel/dmi_scan.c33
-rw-r--r--arch/i386/kernel/microcode.c4
-rw-r--r--arch/i386/kernel/smpboot.c33
-rw-r--r--arch/i386/kernel/vmlinux.lds.S3
-rw-r--r--arch/i386/mach-visws/reboot.c1
-rw-r--r--arch/ia64/Kconfig9
-rw-r--r--arch/ia64/Makefile5
-rw-r--r--arch/ia64/configs/gensparse_defconfig1
-rw-r--r--arch/ia64/configs/sn2_defconfig1
-rw-r--r--arch/ia64/defconfig1
-rw-r--r--arch/ia64/dig/setup.c5
-rw-r--r--arch/ia64/ia32/sys_ia32.c14
-rw-r--r--arch/ia64/kernel/acpi.c35
-rw-r--r--arch/ia64/kernel/ivt.S1
-rw-r--r--arch/ia64/kernel/machvec.c19
-rw-r--r--arch/ia64/kernel/mca.c110
-rw-r--r--arch/ia64/kernel/mca_drv.c22
-rw-r--r--arch/ia64/kernel/mca_drv.h7
-rw-r--r--arch/ia64/kernel/mca_drv_asm.S13
-rw-r--r--arch/ia64/kernel/numa.c2
-rw-r--r--arch/ia64/kernel/patch.c8
-rw-r--r--arch/ia64/kernel/setup.c61
-rw-r--r--arch/ia64/kernel/smpboot.c109
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S60
-rw-r--r--arch/ia64/mm/contig.c8
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/ia64/mm/hugetlbpage.c7
-rw-r--r--arch/ia64/mm/init.c16
-rw-r--r--arch/ia64/sn/kernel/bte.c2
-rw-r--r--arch/ia64/sn/kernel/io_init.c29
-rw-r--r--arch/ia64/sn/kernel/irq.c21
-rw-r--r--arch/ia64/sn/kernel/tiocx.c10
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c17
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c2
-rw-r--r--arch/m32r/Kconfig.debug2
-rw-r--r--arch/m32r/Makefile5
-rw-r--r--arch/m68k/kernel/process.c2
-rw-r--r--arch/m68knommu/kernel/process.c2
-rw-r--r--arch/mips/kernel/sysirix.c22
-rw-r--r--arch/mips/mm/dma-ip32.c6
-rw-r--r--arch/parisc/kernel/process.c5
-rw-r--r--arch/powerpc/Makefile2
-rw-r--r--arch/ppc/8xx_io/cs4218_tdm.c10
-rw-r--r--arch/ppc/Makefile2
-rw-r--r--arch/ppc/boot/Makefile8
-rw-r--r--arch/ppc/boot/openfirmware/Makefile7
-rw-r--r--arch/ppc/syslib/ppc85xx_setup.c2
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/appldata/appldata_base.c3
-rw-r--r--arch/s390/kernel/debug.c11
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/setup.c108
-rw-r--r--arch/s390/kernel/smp.c4
-rw-r--r--arch/s390/mm/cmm.c6
-rw-r--r--arch/sh/Makefile2
-rw-r--r--arch/sh/kernel/process.c1
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/kernel/irq.c66
-rw-r--r--arch/sparc/kernel/smp.c84
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c4
-rw-r--r--arch/sparc/kernel/sun4d_irq.c2
-rw-r--r--arch/sparc/kernel/sun4d_smp.c8
-rw-r--r--arch/sparc/kernel/sun4m_smp.c181
-rw-r--r--arch/sparc/mm/srmmu.c6
-rw-r--r--arch/sparc64/Kconfig.debug2
-rw-r--r--arch/sparc64/kernel/irq.c2
-rw-r--r--arch/um/Makefile7
-rw-r--r--arch/v850/kernel/process.c2
-rw-r--r--arch/x86_64/Kconfig40
-rw-r--r--arch/x86_64/Makefile6
-rw-r--r--arch/x86_64/defconfig34
-rw-r--r--arch/x86_64/ia32/ia32_binfmt.c2
-rw-r--r--arch/x86_64/ia32/sys_ia32.c16
-rw-r--r--arch/x86_64/kernel/aperture.c4
-rw-r--r--arch/x86_64/kernel/apic.c20
-rw-r--r--arch/x86_64/kernel/early_printk.c90
-rw-r--r--arch/x86_64/kernel/entry.S2
-rw-r--r--arch/x86_64/kernel/functionlist1286
-rw-r--r--arch/x86_64/kernel/head.S26
-rw-r--r--arch/x86_64/kernel/io_apic.c10
-rw-r--r--arch/x86_64/kernel/mce.c3
-rw-r--r--arch/x86_64/kernel/mpparse.c19
-rw-r--r--arch/x86_64/kernel/nmi.c1
-rw-r--r--arch/x86_64/kernel/pci-dma.c3
-rw-r--r--arch/x86_64/kernel/pci-gart.c11
-rw-r--r--arch/x86_64/kernel/pmtimer.c3
-rw-r--r--arch/x86_64/kernel/process.c11
-rw-r--r--arch/x86_64/kernel/ptrace.c6
-rw-r--r--arch/x86_64/kernel/setup.c85
-rw-r--r--arch/x86_64/kernel/setup64.c18
-rw-r--r--arch/x86_64/kernel/smp.c6
-rw-r--r--arch/x86_64/kernel/time.c129
-rw-r--r--arch/x86_64/kernel/traps.c21
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S6
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c4
-rw-r--r--arch/x86_64/lib/thunk.S1
-rw-r--r--arch/x86_64/mm/fault.c79
-rw-r--r--arch/x86_64/mm/init.c36
-rw-r--r--arch/x86_64/mm/k8topology.c2
-rw-r--r--arch/x86_64/mm/numa.c24
-rw-r--r--arch/x86_64/mm/srat.c8
-rw-r--r--arch/x86_64/pci/mmconfig.c18
135 files changed, 2715 insertions, 1264 deletions
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index b5b1e4087516..99c0d323719a 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -1,6 +1,9 @@
#
# arch/arm/Makefile
#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
@@ -177,7 +180,7 @@ endif
archprepare: maketools
-.PHONY: maketools FORCE
+PHONY += maketools FORCE
maketools: include/linux/version.h include/asm-arm/.arch FORCE
$(Q)$(MAKE) $(build)=arch/arm/tools include/asm-arm/mach-types.h
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index a174d63395ea..ec9c400c7f82 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -1,6 +1,9 @@
#
# arch/arm/boot/Makefile
#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
@@ -73,7 +76,7 @@ $(obj)/bootpImage: $(obj)/bootp/bootp FORCE
$(call if_changed,objcopy)
@echo ' Kernel: $@ is ready'
-.PHONY: initrd FORCE
+PHONY += initrd FORCE
initrd:
@test "$(INITRD_PHYS)" != "" || \
(echo This machine does not support INITRD; exit -1)
diff --git a/arch/arm/boot/bootp/Makefile b/arch/arm/boot/bootp/Makefile
index 8e8879b6b3d7..c394e305447c 100644
--- a/arch/arm/boot/bootp/Makefile
+++ b/arch/arm/boot/bootp/Makefile
@@ -1,6 +1,9 @@
#
# linux/arch/arm/boot/bootp/Makefile
#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
LDFLAGS_bootp :=-p --no-undefined -X \
--defsym initrd_phys=$(INITRD_PHYS) \
@@ -21,4 +24,4 @@ $(obj)/kernel.o: arch/arm/boot/zImage FORCE
$(obj)/initrd.o: $(INITRD) FORCE
-.PHONY: $(INITRD) FORCE
+PHONY += $(INITRD) FORCE
diff --git a/arch/arm/mach-pxa/leds-mainstone.c b/arch/arm/mach-pxa/leds-mainstone.c
index bbd3f87a9fc2..c06d3d7a8dd4 100644
--- a/arch/arm/mach-pxa/leds-mainstone.c
+++ b/arch/arm/mach-pxa/leds-mainstone.c
@@ -85,7 +85,7 @@ void mainstone_leds_event(led_event_t evt)
break;
case led_green_on:
- hw_led_state |= D21;;
+ hw_led_state |= D21;
break;
case led_green_off:
@@ -93,7 +93,7 @@ void mainstone_leds_event(led_event_t evt)
break;
case led_amber_on:
- hw_led_state |= D22;;
+ hw_led_state |= D22;
break;
case led_amber_off:
@@ -101,7 +101,7 @@ void mainstone_leds_event(led_event_t evt)
break;
case led_red_on:
- hw_led_state |= D23;;
+ hw_led_state |= D23;
break;
case led_red_off:
diff --git a/arch/arm/mach-s3c2410/cpu.c b/arch/arm/mach-s3c2410/cpu.c
index 00a379334b60..70c34fcf7858 100644
--- a/arch/arm/mach-s3c2410/cpu.c
+++ b/arch/arm/mach-s3c2410/cpu.c
@@ -146,7 +146,7 @@ void s3c24xx_set_board(struct s3c24xx_board *b)
board = b;
if (b->clocks_count != 0) {
- struct clk **ptr = b->clocks;;
+ struct clk **ptr = b->clocks;
for (i = b->clocks_count; i > 0; i--, ptr++)
s3c24xx_register_clock(*ptr);
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index 6888816a1935..102454082474 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -40,6 +40,7 @@
#include <asm/hardware/scoop.h>
#include <asm/mach/sharpsl_param.h>
#include <asm/hardware/locomo.h>
+#include <asm/arch/mcp.h>
#include "generic.h"
@@ -66,6 +67,32 @@ struct platform_device colliescoop_device = {
.resource = collie_scoop_resources,
};
+static struct scoop_pcmcia_dev collie_pcmcia_scoop[] = {
+{
+ .dev = &colliescoop_device.dev,
+ .irq = COLLIE_IRQ_GPIO_CF_IRQ,
+ .cd_irq = COLLIE_IRQ_GPIO_CF_CD,
+ .cd_irq_str = "PCMCIA0 CD",
+},
+};
+
+static struct scoop_pcmcia_config collie_pcmcia_config = {
+ .devs = &collie_pcmcia_scoop[0],
+ .num_devs = 1,
+};
+
+
+static struct mcp_plat_data collie_mcp_data = {
+ .mccr0 = MCCR0_ADM,
+ .sclk_rate = 11981000,
+};
+
+
+static struct sa1100_port_fns collie_port_fns __initdata = {
+ .set_mctrl = collie_uart_set_mctrl,
+ .get_mctrl = collie_uart_get_mctrl,
+};
+
static struct resource locomo_resources[] = {
[0] = {
@@ -159,6 +186,8 @@ static void __init collie_init(void)
GPDR |= GPIO_32_768kHz;
TUCR = TUCR_32_768kHz;
+ platform_scoop_config = &collie_pcmcia_config;
+
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
if (ret) {
printk(KERN_WARNING "collie: Unable to register LoCoMo device\n");
@@ -166,6 +195,7 @@ static void __init collie_init(void)
sa11x0_set_flash_data(&collie_flash_data, collie_flash_resources,
ARRAY_SIZE(collie_flash_resources));
+ sa11x0_set_mcp_data(&collie_mcp_data);
sharpsl_save_param();
}
diff --git a/arch/arm26/Makefile b/arch/arm26/Makefile
index 844a9e46886e..fe91eda98a94 100644
--- a/arch/arm26/Makefile
+++ b/arch/arm26/Makefile
@@ -1,6 +1,9 @@
#
# arch/arm26/Makefile
#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
@@ -49,9 +52,9 @@ all: zImage
boot := arch/arm26/boot
-.PHONY: maketools FORCE
+PHONY += maketools FORCE
maketools: FORCE
-
+
# Convert bzImage to zImage
bzImage: vmlinux
diff --git a/arch/arm26/boot/Makefile b/arch/arm26/boot/Makefile
index b5c2277654d4..68acb7b0d47f 100644
--- a/arch/arm26/boot/Makefile
+++ b/arch/arm26/boot/Makefile
@@ -1,6 +1,9 @@
#
# arch/arm26/boot/Makefile
#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
@@ -60,7 +63,7 @@ $(obj)/xipImage: vmlinux FORCE
@echo ' Kernel: $@ is ready'
endif
-.PHONY: initrd
+PHONY += initrd
initrd:
@test "$(INITRD_PHYS)" != "" || \
(echo This machine does not support INITRD; exit -1)
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index 501fa52d8d3a..c59ee28a35f4 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2944,7 +2944,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
int spdl_err;
/* Mark output pages dirty. */
spdl_err = set_page_dirty_lock(outpages[i]);
- DEBUG(if (spdl_err)printk("cryptocop_ioctl_process: set_page_dirty_lock returned %d\n", spdl_err));
+ DEBUG(if (spdl_err < 0)printk("cryptocop_ioctl_process: set_page_dirty_lock returned %d\n", spdl_err));
}
for (i = 0; i < nooutpages; i++){
put_page(outpages[i]);
diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 4ab3e87115b6..123451c44154 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -116,6 +116,7 @@
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
+#include <asm/system.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/fs_struct.h>
@@ -194,8 +195,6 @@ EXPORT_SYMBOL(enable_hlt);
*/
void (*pm_idle)(void);
-extern void default_idle(void);
-
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
diff --git a/arch/frv/kernel/gdb-stub.c b/arch/frv/kernel/gdb-stub.c
index 8f860d9c4947..508601fad079 100644
--- a/arch/frv/kernel/gdb-stub.c
+++ b/arch/frv/kernel/gdb-stub.c
@@ -1406,7 +1406,7 @@ void gdbstub(int sigval)
__debug_frame->psr |= PSR_S;
__debug_regs->brr = (__debug_frame->tbr & TBR_TT) << 12;
__debug_regs->brr |= BRR_EB;
- sigval = SIGXCPU;;
+ sigval = SIGXCPU;
}
LEDS(0x5002);
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index dd344f112cfe..16ccddc69c2b 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -54,7 +54,7 @@ asmlinkage void ret_from_fork(void);
* The idle loop on an H8/300..
*/
#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM)
-void default_idle(void)
+static void default_idle(void)
{
local_irq_disable();
if (!need_resched()) {
@@ -65,7 +65,7 @@ void default_idle(void)
local_irq_enable();
}
#else
-void default_idle(void)
+static void default_idle(void)
{
cpu_relax();
}
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index bfea1bedcbf2..b008fb0cd7b7 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -737,7 +737,7 @@ config PHYSICAL_START
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
- depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER
+ depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER && !X86_PC
---help---
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
diff --git a/arch/i386/Kconfig.debug b/arch/i386/Kconfig.debug
index 00108ba9a78d..6e97df6979e8 100644
--- a/arch/i386/Kconfig.debug
+++ b/arch/i386/Kconfig.debug
@@ -44,8 +44,8 @@ comment "Page alloc debug is incompatible with Software Suspend on i386"
depends on DEBUG_KERNEL && SOFTWARE_SUSPEND
config DEBUG_PAGEALLOC
- bool "Page alloc debugging"
- depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
+ bool "Debug page memory allocations"
+ depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND && !HUGETLBFS
help
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 36bef6543ac1..c848a5b30391 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -39,6 +39,9 @@ include $(srctree)/arch/i386/Makefile.cpu
cflags-$(CONFIG_REGPARM) += -mregparm=3
+# temporary until string.h is fixed
+cflags-y += -ffreestanding
+
# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
# a lot more stack due to the lack of sharing of stacklots:
CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;)
@@ -99,8 +102,8 @@ AFLAGS += $(mflags-y)
boot := arch/i386/boot
-.PHONY: zImage bzImage compressed zlilo bzlilo \
- zdisk bzdisk fdimage fdimage144 fdimage288 install
+PHONY += zImage bzImage compressed zlilo bzlilo \
+ zdisk bzdisk fdimage fdimage144 fdimage288 install
all: bzImage
diff --git a/arch/i386/Makefile.cpu b/arch/i386/Makefile.cpu
index dcd936ef45db..a11befba26d5 100644
--- a/arch/i386/Makefile.cpu
+++ b/arch/i386/Makefile.cpu
@@ -39,3 +39,7 @@ cflags-$(CONFIG_X86_ELAN) += -march=i486
# Geode GX1 support
cflags-$(CONFIG_MGEODEGX1) += -march=pentium-mmx
+# add at the end to overwrite eventual tuning options from earlier
+# cpu entries
+cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic)
+
diff --git a/arch/i386/boot/edd.S b/arch/i386/boot/edd.S
index d8d69f2b911d..4b84ea216f2b 100644
--- a/arch/i386/boot/edd.S
+++ b/arch/i386/boot/edd.S
@@ -76,6 +76,8 @@ edd_mbr_sig_read:
popw %es
popw %bx
jc edd_mbr_sig_done # on failure, we're done.
+ cmpb $0, %ah # some BIOSes do not set CF
+ jne edd_mbr_sig_done # on failure, we're done.
movl (EDDBUF+EDD_MBR_SIG_OFFSET), %eax # read sig out of the MBR
movl %eax, (%bx) # store success
incb (EDD_MBR_SIG_NR_BUF) # note that we stored something
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 05312a8abb8b..da30a374dd4e 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -824,8 +824,6 @@ static void apm_do_busy(void)
static void (*original_pm_idle)(void);
-extern void default_idle(void);
-
/**
* apm_cpu_idle - cpu idling for APM capable Linux
*
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index 26892d2099b0..e44a4c6a4fe5 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -96,7 +96,6 @@ config X86_POWERNOW_K8_ACPI
config X86_GX_SUSPMOD
tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
- depends on PCI
help
This add the CPUFreq driver for NatSemi Geode processors which
support suspend modulation.
@@ -115,9 +114,9 @@ config X86_SPEEDSTEP_CENTRINO
you also need to say Y to "Use ACPI tables to decode..." below
[which might imply enabling ACPI] if you want to use this driver
on non-Banias CPUs.
-
+
For details, take a look at <file:Documentation/cpu-freq/>.
-
+
If in doubt, say N.
config X86_SPEEDSTEP_CENTRINO_ACPI
@@ -148,7 +147,7 @@ config X86_SPEEDSTEP_ICH
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
(Coppermine), all mobile Intel Pentium III-M (Tualatin) and all
- mobile Intel Pentium 4 P4-M on systems which have an Intel ICH2,
+ mobile Intel Pentium 4 P4-M on systems which have an Intel ICH2,
ICH3 or ICH4 southbridge.
For details, take a look at <file:Documentation/cpu-freq/>.
@@ -161,7 +160,7 @@ config X86_SPEEDSTEP_SMI
depends on EXPERIMENTAL
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
- (Coppermine), all mobile Intel Pentium III-M (Tualatin)
+ (Coppermine), all mobile Intel Pentium III-M (Tualatin)
on systems which have an Intel 440BX/ZX/MX southbridge.
For details, take a look at <file:Documentation/cpu-freq/>.
@@ -203,9 +202,10 @@ config X86_LONGRUN
config X86_LONGHAUL
tristate "VIA Cyrix III Longhaul"
select CPU_FREQ_TABLE
+ depends on BROKEN
help
- This adds the CPUFreq driver for VIA Samuel/CyrixIII,
- VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
+ This adds the CPUFreq driver for VIA Samuel/CyrixIII,
+ VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
processors.
For details, take a look at <file:Documentation/cpu-freq/>.
@@ -215,11 +215,11 @@ config X86_LONGHAUL
comment "shared options"
config X86_ACPI_CPUFREQ_PROC_INTF
- bool "/proc/acpi/processor/../performance interface (deprecated)"
+ bool "/proc/acpi/processor/../performance interface (deprecated)"
depends on PROC_FS
depends on X86_ACPI_CPUFREQ || X86_SPEEDSTEP_CENTRINO_ACPI || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI
help
- This enables the deprecated /proc/acpi/processor/../performance
+ This enables the deprecated /proc/acpi/processor/../performance
interface. While it is helpful for debugging, the generic,
cross-architecture cpufreq interfaces should be used.
@@ -233,9 +233,9 @@ config X86_SPEEDSTEP_RELAXED_CAP_CHECK
bool "Relaxed speedstep capability checks"
depends on (X86_SPEEDSTEP_SMI || X86_SPEEDSTEP_ICH)
help
- Don't perform all checks for a speedstep capable system which would
- normally be done. Some ancient or strange systems, though speedstep
- capable, don't always indicate that they are speedstep capable. This
+ Don't perform all checks for a speedstep capable system which would
+ normally be done. Some ancient or strange systems, though speedstep
+ capable, don't always indicate that they are speedstep capable. This
option lets the probing code bypass some of those checks if the
parameter "relaxed_check=1" is passed to the module.
diff --git a/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c
index 2b62dee35c6c..f275e0d4aee5 100644
--- a/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c
+++ b/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c
@@ -39,7 +39,7 @@ static struct pci_dev *nforce2_chipset_dev;
static int fid = 0;
/* min_fsb, max_fsb:
- * minimum and maximum FSB (= FSB at boot time)
+ * minimum and maximum FSB (= FSB at boot time)
*/
static int min_fsb = 0;
static int max_fsb = 0;
@@ -57,10 +57,10 @@ MODULE_PARM_DESC(min_fsb,
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "cpufreq-nforce2", msg)
-/*
+/**
* nforce2_calc_fsb - calculate FSB
* @pll: PLL value
- *
+ *
* Calculates FSB from PLL value
*/
static int nforce2_calc_fsb(int pll)
@@ -76,10 +76,10 @@ static int nforce2_calc_fsb(int pll)
return 0;
}
-/*
+/**
* nforce2_calc_pll - calculate PLL value
* @fsb: FSB
- *
+ *
* Calculate PLL value for given FSB
*/
static int nforce2_calc_pll(unsigned int fsb)
@@ -106,10 +106,10 @@ static int nforce2_calc_pll(unsigned int fsb)
return NFORCE2_PLL(mul, div);
}
-/*
+/**
* nforce2_write_pll - write PLL value to chipset
* @pll: PLL value
- *
+ *
* Writes new FSB PLL value to chipset
*/
static void nforce2_write_pll(int pll)
@@ -121,15 +121,13 @@ static void nforce2_write_pll(int pll)
pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLADR, temp);
/* Now write the value in all 64 registers */
- for (temp = 0; temp <= 0x3f; temp++) {
- pci_write_config_dword(nforce2_chipset_dev,
- NFORCE2_PLLREG, pll);
- }
+ for (temp = 0; temp <= 0x3f; temp++)
+ pci_write_config_dword(nforce2_chipset_dev, NFORCE2_PLLREG, pll);
return;
}
-/*
+/**
* nforce2_fsb_read - Read FSB
*
* Read FSB from chipset
@@ -140,39 +138,32 @@ static unsigned int nforce2_fsb_read(int bootfsb)
struct pci_dev *nforce2_sub5;
u32 fsb, temp = 0;
-
/* Get chipset boot FSB from subdevice 5 (FSB at boot-time) */
nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
- 0x01EF,
- PCI_ANY_ID,
- PCI_ANY_ID,
- NULL);
-
+ 0x01EF,PCI_ANY_ID,PCI_ANY_ID,NULL);
if (!nforce2_sub5)
return 0;
pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb);
fsb /= 1000000;
-
+
/* Check if PLL register is already set */
- pci_read_config_byte(nforce2_chipset_dev,
- NFORCE2_PLLENABLE, (u8 *)&temp);
-
+ pci_read_config_byte(nforce2_chipset_dev,NFORCE2_PLLENABLE, (u8 *)&temp);
+
if(bootfsb || !temp)
return fsb;
/* Use PLL register FSB value */
- pci_read_config_dword(nforce2_chipset_dev,
- NFORCE2_PLLREG, &temp);
+ pci_read_config_dword(nforce2_chipset_dev,NFORCE2_PLLREG, &temp);
fsb = nforce2_calc_fsb(temp);
return fsb;
}
-/*
+/**
* nforce2_set_fsb - set new FSB
* @fsb: New FSB
- *
+ *
* Sets new FSB
*/
static int nforce2_set_fsb(unsigned int fsb)
@@ -186,7 +177,7 @@ static int nforce2_set_fsb(unsigned int fsb)
printk(KERN_ERR "cpufreq: FSB %d is out of range!\n", fsb);
return -EINVAL;
}
-
+
tfsb = nforce2_fsb_read(0);
if (!tfsb) {
printk(KERN_ERR "cpufreq: Error while reading the FSB\n");
@@ -194,8 +185,7 @@ static int nforce2_set_fsb(unsigned int fsb)
}
/* First write? Then set actual value */
- pci_read_config_byte(nforce2_chipset_dev,
- NFORCE2_PLLENABLE, (u8 *)&temp);
+ pci_read_config_byte(nforce2_chipset_dev,NFORCE2_PLLENABLE, (u8 *)&temp);
if (!temp) {
pll = nforce2_calc_pll(tfsb);
@@ -223,7 +213,7 @@ static int nforce2_set_fsb(unsigned int fsb)
/* Calculate the PLL reg. value */
if ((pll = nforce2_calc_pll(tfsb)) == -1)
return -EINVAL;
-
+
nforce2_write_pll(pll);
#ifdef NFORCE2_DELAY
mdelay(NFORCE2_DELAY);
@@ -239,7 +229,7 @@ static int nforce2_set_fsb(unsigned int fsb)
/**
* nforce2_get - get the CPU frequency
* @cpu: CPU number
- *
+ *
* Returns the CPU frequency
*/
static unsigned int nforce2_get(unsigned int cpu)
@@ -354,10 +344,10 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
printk(KERN_INFO "cpufreq: FSB currently at %i MHz, FID %d.%d\n", fsb,
fid / 10, fid % 10);
-
+
/* Set maximum FSB to FSB at boot time */
max_fsb = nforce2_fsb_read(1);
-
+
if(!max_fsb)
return -EIO;
@@ -398,17 +388,15 @@ static struct cpufreq_driver nforce2_driver = {
* nforce2_detect_chipset - detect the Southbridge which contains FSB PLL logic
*
* Detects nForce2 A2 and C1 stepping
- *
+ *
*/
static unsigned int nforce2_detect_chipset(void)
{
u8 revision;
nforce2_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
- PCI_DEVICE_ID_NVIDIA_NFORCE2,
- PCI_ANY_ID,
- PCI_ANY_ID,
- NULL);
+ PCI_DEVICE_ID_NVIDIA_NFORCE2,
+ PCI_ANY_ID, PCI_ANY_ID, NULL);
if (nforce2_chipset_dev == NULL)
return -ENODEV;
diff --git a/arch/i386/kernel/cpu/cpufreq/elanfreq.c b/arch/i386/kernel/cpu/cpufreq/elanfreq.c
index 3f7caa4ae6d6..f317276afa7a 100644
--- a/arch/i386/kernel/cpu/cpufreq/elanfreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/elanfreq.c
@@ -1,16 +1,16 @@
/*
- * elanfreq: cpufreq driver for the AMD ELAN family
+ * elanfreq: cpufreq driver for the AMD ELAN family
*
* (c) Copyright 2002 Robert Schwebel <r.schwebel@pengutronix.de>
*
- * Parts of this code are (c) Sven Geggus <sven@geggus.net>
+ * Parts of this code are (c) Sven Geggus <sven@geggus.net>
*
- * All Rights Reserved.
+ * All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
+ * 2 of the License, or (at your option) any later version.
*
* 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel
*
@@ -28,7 +28,7 @@
#include <asm/timex.h>
#include <asm/io.h>
-#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */
+#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */
#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */
/* Module parameter */
@@ -41,7 +41,7 @@ struct s_elan_multiplier {
};
/*
- * It is important that the frequencies
+ * It is important that the frequencies
* are listed in ascending order here!
*/
struct s_elan_multiplier elan_multiplier[] = {
@@ -72,78 +72,79 @@ static struct cpufreq_frequency_table elanfreq_table[] = {
* elanfreq_get_cpu_frequency: determine current cpu speed
*
* Finds out at which frequency the CPU of the Elan SOC runs
- * at the moment. Frequencies from 1 to 33 MHz are generated
+ * at the moment. Frequencies from 1 to 33 MHz are generated
* the normal way, 66 and 99 MHz are called "Hyperspeed Mode"
- * and have the rest of the chip running with 33 MHz.
+ * and have the rest of the chip running with 33 MHz.
*/
static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu)
{
- u8 clockspeed_reg; /* Clock Speed Register */
-
+ u8 clockspeed_reg; /* Clock Speed Register */
+
local_irq_disable();
- outb_p(0x80,REG_CSCIR);
- clockspeed_reg = inb_p(REG_CSCDR);
+ outb_p(0x80,REG_CSCIR);
+ clockspeed_reg = inb_p(REG_CSCDR);
local_irq_enable();
- if ((clockspeed_reg & 0xE0) == 0xE0) { return 0; }
+ if ((clockspeed_reg & 0xE0) == 0xE0)
+ return 0;
- /* Are we in CPU clock multiplied mode (66/99 MHz)? */
- if ((clockspeed_reg & 0xE0) == 0xC0) {
- if ((clockspeed_reg & 0x01) == 0) {
+ /* Are we in CPU clock multiplied mode (66/99 MHz)? */
+ if ((clockspeed_reg & 0xE0) == 0xC0) {
+ if ((clockspeed_reg & 0x01) == 0)
return 66000;
- } else {
- return 99000;
- }
- }
+ else
+ return 99000;
+ }
/* 33 MHz is not 32 MHz... */
if ((clockspeed_reg & 0xE0)==0xA0)
return 33000;
- return ((1<<((clockspeed_reg & 0xE0) >> 5)) * 1000);
+ return ((1<<((clockspeed_reg & 0xE0) >> 5)) * 1000);
}
/**
- * elanfreq_set_cpu_frequency: Change the CPU core frequency
- * @cpu: cpu number
+ * elanfreq_set_cpu_frequency: Change the CPU core frequency
+ * @cpu: cpu number
* @freq: frequency in kHz
*
- * This function takes a frequency value and changes the CPU frequency
+ * This function takes a frequency value and changes the CPU frequency
* according to this. Note that the frequency has to be checked by
* elanfreq_validatespeed() for correctness!
- *
- * There is no return value.
+ *
+ * There is no return value.
*/
-static void elanfreq_set_cpu_state (unsigned int state) {
-
+static void elanfreq_set_cpu_state (unsigned int state)
+{
struct cpufreq_freqs freqs;
freqs.old = elanfreq_get_cpu_frequency(0);
freqs.new = elan_multiplier[state].clock;
freqs.cpu = 0; /* elanfreq.c is UP only driver */
-
+
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",elan_multiplier[state].clock);
+ printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",
+ elan_multiplier[state].clock);
- /*
- * Access to the Elan's internal registers is indexed via
- * 0x22: Chip Setup & Control Register Index Register (CSCI)
- * 0x23: Chip Setup & Control Register Data Register (CSCD)
+ /*
+ * Access to the Elan's internal registers is indexed via
+ * 0x22: Chip Setup & Control Register Index Register (CSCI)
+ * 0x23: Chip Setup & Control Register Data Register (CSCD)
*
*/
- /*
- * 0x40 is the Power Management Unit's Force Mode Register.
+ /*
+ * 0x40 is the Power Management Unit's Force Mode Register.
* Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency)
*/
local_irq_disable();
- outb_p(0x40,REG_CSCIR); /* Disable hyperspeed mode */
+ outb_p(0x40,REG_CSCIR); /* Disable hyperspeed mode */
outb_p(0x00,REG_CSCDR);
local_irq_enable(); /* wait till internal pipelines and */
udelay(1000); /* buffers have cleaned up */
@@ -166,10 +167,10 @@ static void elanfreq_set_cpu_state (unsigned int state) {
/**
* elanfreq_validatespeed: test if frequency range is valid
- * @policy: the policy to validate
+ * @policy: the policy to validate
*
- * This function checks if a given frequency range in kHz is valid
- * for the hardware supported by the driver.
+ * This function checks if a given frequency range in kHz is valid
+ * for the hardware supported by the driver.
*/
static int elanfreq_verify (struct cpufreq_policy *policy)
@@ -177,11 +178,11 @@ static int elanfreq_verify (struct cpufreq_policy *policy)
return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]);
}
-static int elanfreq_target (struct cpufreq_policy *policy,
- unsigned int target_freq,
+static int elanfreq_target (struct cpufreq_policy *policy,
+ unsigned int target_freq,
unsigned int relation)
{
- unsigned int newstate = 0;
+ unsigned int newstate = 0;
if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], target_freq, relation, &newstate))
return -EINVAL;
@@ -212,7 +213,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
max_freq = elanfreq_get_cpu_frequency(0);
/* table init */
- for (i=0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ for (i=0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
if (elanfreq_table[i].frequency > max_freq)
elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
}
@@ -226,8 +227,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
if (result)
return (result);
- cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
-
+ cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
return 0;
}
@@ -268,9 +268,9 @@ static struct freq_attr* elanfreq_attr[] = {
static struct cpufreq_driver elanfreq_driver = {
- .get = elanfreq_get_cpu_frequency,
- .verify = elanfreq_verify,
- .target = elanfreq_target,
+ .get = elanfreq_get_cpu_frequency,
+ .verify = elanfreq_verify,
+ .target = elanfreq_target,
.init = elanfreq_cpu_init,
.exit = elanfreq_cpu_exit,
.name = "elanfreq",
@@ -279,23 +279,21 @@ static struct cpufreq_driver elanfreq_driver = {
};
-static int __init elanfreq_init(void)
-{
+static int __init elanfreq_init(void)
+{
struct cpuinfo_x86 *c = cpu_data;
/* Test if we have the right hardware */
if ((c->x86_vendor != X86_VENDOR_AMD) ||
- (c->x86 != 4) || (c->x86_model!=10))
- {
+ (c->x86 != 4) || (c->x86_model!=10)) {
printk(KERN_INFO "elanfreq: error: no Elan processor found!\n");
return -ENODEV;
}
-
return cpufreq_register_driver(&elanfreq_driver);
}
-static void __exit elanfreq_exit(void)
+static void __exit elanfreq_exit(void)
{
cpufreq_unregister_driver(&elanfreq_driver);
}
@@ -309,4 +307,3 @@ MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs");
module_init(elanfreq_init);
module_exit(elanfreq_exit);
-
diff --git a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
index e86ea486c311..92afa3bc84f1 100644
--- a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
@@ -6,12 +6,12 @@
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation
+ * version 2 as published by the Free Software Foundation
*
* The author(s) of this software shall not be held liable for damages
* of any nature resulting due to the use of this software. This
* software is provided AS-IS with no warranties.
- *
+ *
* Theoritical note:
*
* (see Geode(tm) CS5530 manual (rev.4.1) page.56)
@@ -21,18 +21,18 @@
*
* Suspend Modulation works by asserting and de-asserting the SUSP# pin
* to CPU(GX1/GXLV) for configurable durations. When asserting SUSP#
- * the CPU enters an idle state. GX1 stops its core clock when SUSP# is
+ * the CPU enters an idle state. GX1 stops its core clock when SUSP# is
* asserted then power consumption is reduced.
*
- * Suspend Modulation's OFF/ON duration are configurable
+ * Suspend Modulation's OFF/ON duration are configurable
* with 'Suspend Modulation OFF Count Register'
* and 'Suspend Modulation ON Count Register'.
- * These registers are 8bit counters that represent the number of
+ * These registers are 8bit counters that represent the number of
* 32us intervals which the SUSP# pin is asserted(ON)/de-asserted(OFF)
* to the processor.
*
- * These counters define a ratio which is the effective frequency
- * of operation of the system.
+ * These counters define a ratio which is the effective frequency
+ * of operation of the system.
*
* OFF Count
* F_eff = Fgx * ----------------------
@@ -40,24 +40,24 @@
*
* 0 <= On Count, Off Count <= 255
*
- * From these limits, we can get register values
+ * From these limits, we can get register values
*
* off_duration + on_duration <= MAX_DURATION
* on_duration = off_duration * (stock_freq - freq) / freq
*
- * off_duration = (freq * DURATION) / stock_freq
- * on_duration = DURATION - off_duration
+ * off_duration = (freq * DURATION) / stock_freq
+ * on_duration = DURATION - off_duration
*
*
*---------------------------------------------------------------------------
*
* ChangeLog:
- * Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org>
- * - fix on/off register mistake
- * - fix cpu_khz calc when it stops cpu modulation.
+ * Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org>
+ * - fix on/off register mistake
+ * - fix cpu_khz calc when it stops cpu modulation.
*
- * Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org>
- * - rewrite for Cyrix MediaGX Cx5510/5520 and
+ * Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org>
+ * - rewrite for Cyrix MediaGX Cx5510/5520 and
* NatSemi Geode Cs5530(A).
*
* Jul. ??, 2002 Zwane Mwaikambo <zwane@commfireservices.com>
@@ -74,40 +74,40 @@
************************************************************************/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
#include <linux/pci.h>
-#include <asm/processor.h>
+#include <asm/processor.h>
#include <asm/errno.h>
/* PCI config registers, all at F0 */
-#define PCI_PMER1 0x80 /* power management enable register 1 */
-#define PCI_PMER2 0x81 /* power management enable register 2 */
-#define PCI_PMER3 0x82 /* power management enable register 3 */
-#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */
-#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */
-#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */
-#define PCI_MODON 0x95 /* suspend modulation ON counter register */
-#define PCI_SUSCFG 0x96 /* suspend configuration register */
+#define PCI_PMER1 0x80 /* power management enable register 1 */
+#define PCI_PMER2 0x81 /* power management enable register 2 */
+#define PCI_PMER3 0x82 /* power management enable register 3 */
+#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */
+#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */
+#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */
+#define PCI_MODON 0x95 /* suspend modulation ON counter register */
+#define PCI_SUSCFG 0x96 /* suspend configuration register */
/* PMER1 bits */
-#define GPM (1<<0) /* global power management */
-#define GIT (1<<1) /* globally enable PM device idle timers */
-#define GTR (1<<2) /* globally enable IO traps */
-#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */
-#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */
+#define GPM (1<<0) /* global power management */
+#define GIT (1<<1) /* globally enable PM device idle timers */
+#define GTR (1<<2) /* globally enable IO traps */
+#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */
+#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */
/* SUSCFG bits */
-#define SUSMOD (1<<0) /* enable/disable suspend modulation */
-/* the belows support only with cs5530 (after rev.1.2)/cs5530A */
-#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */
- /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */
-#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */
-/* the belows support only with cs5530A */
-#define PWRSVE_ISA (1<<3) /* stop ISA clock */
-#define PWRSVE (1<<4) /* active idle */
+#define SUSMOD (1<<0) /* enable/disable suspend modulation */
+/* the belows support only with cs5530 (after rev.1.2)/cs5530A */
+#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */
+ /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */
+#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */
+/* the belows support only with cs5530A */
+#define PWRSVE_ISA (1<<3) /* stop ISA clock */
+#define PWRSVE (1<<4) /* active idle */
struct gxfreq_params {
u8 on_duration;
@@ -128,7 +128,7 @@ module_param (pci_busclk, int, 0444);
/* maximum duration for which the cpu may be suspended
* (32us * MAX_DURATION). If no parameter is given, this defaults
- * to 255.
+ * to 255.
* Note that this leads to a maximum of 8 ms(!) where the CPU clock
* is suspended -- processing power is just 0.39% of what it used to be,
* though. 781.25 kHz(!) for a 200 MHz processor -- wow. */
@@ -144,17 +144,17 @@ module_param (max_duration, int, 0444);
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "gx-suspmod", msg)
/**
- * we can detect a core multipiler from dir0_lsb
- * from GX1 datasheet p.56,
- * MULT[3:0]:
- * 0000 = SYSCLK multiplied by 4 (test only)
- * 0001 = SYSCLK multiplied by 10
- * 0010 = SYSCLK multiplied by 4
- * 0011 = SYSCLK multiplied by 6
- * 0100 = SYSCLK multiplied by 9
- * 0101 = SYSCLK multiplied by 5
- * 0110 = SYSCLK multiplied by 7
- * 0111 = SYSCLK multiplied by 8
+ * we can detect a core multipiler from dir0_lsb
+ * from GX1 datasheet p.56,
+ * MULT[3:0]:
+ * 0000 = SYSCLK multiplied by 4 (test only)
+ * 0001 = SYSCLK multiplied by 10
+ * 0010 = SYSCLK multiplied by 4
+ * 0011 = SYSCLK multiplied by 6
+ * 0100 = SYSCLK multiplied by 9
+ * 0101 = SYSCLK multiplied by 5
+ * 0110 = SYSCLK multiplied by 7
+ * 0111 = SYSCLK multiplied by 8
* of 33.3MHz
**/
static int gx_freq_mult[16] = {
@@ -164,17 +164,17 @@ static int gx_freq_mult[16] = {
/****************************************************************
- * Low Level chipset interface *
+ * Low Level chipset interface *
****************************************************************/
static struct pci_device_id gx_chipset_tbl[] __initdata = {
- { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID },
- { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID },
- { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID },
- { 0, },
+ { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID },
+ { 0, },
};
/**
- * gx_detect_chipset:
+ * gx_detect_chipset:
*
**/
static __init struct pci_dev *gx_detect_chipset(void)
@@ -182,17 +182,16 @@ static __init struct pci_dev *gx_detect_chipset(void)
struct pci_dev *gx_pci = NULL;
/* check if CPU is a MediaGX or a Geode. */
- if ((current_cpu_data.x86_vendor != X86_VENDOR_NSC) &&
+ if ((current_cpu_data.x86_vendor != X86_VENDOR_NSC) &&
(current_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) {
dprintk("error: no MediaGX/Geode processor found!\n");
- return NULL;
+ return NULL;
}
/* detect which companion chip is used */
while ((gx_pci = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, gx_pci)) != NULL) {
- if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL) {
+ if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL)
return gx_pci;
- }
}
dprintk("error: no supported chipset found!\n");
@@ -200,24 +199,24 @@ static __init struct pci_dev *gx_detect_chipset(void)
}
/**
- * gx_get_cpuspeed:
+ * gx_get_cpuspeed:
*
* Finds out at which efficient frequency the Cyrix MediaGX/NatSemi Geode CPU runs.
*/
static unsigned int gx_get_cpuspeed(unsigned int cpu)
{
- if ((gx_params->pci_suscfg & SUSMOD) == 0)
+ if ((gx_params->pci_suscfg & SUSMOD) == 0)
return stock_freq;
- return (stock_freq * gx_params->off_duration)
+ return (stock_freq * gx_params->off_duration)
/ (gx_params->on_duration + gx_params->off_duration);
}
/**
* gx_validate_speed:
* determine current cpu speed
- *
-**/
+ *
+ **/
static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off_duration)
{
@@ -230,7 +229,7 @@ static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off
*on_duration=0;
for (i=max_duration; i>0; i--) {
- tmp_off = ((khz * i) / stock_freq) & 0xff;
+ tmp_off = ((khz * i) / stock_freq) & 0xff;
tmp_on = i - tmp_off;
tmp_freq = (stock_freq * tmp_off) / i;
/* if this relation is closer to khz, use this. If it's equal,
@@ -247,18 +246,17 @@ static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off
/**
- * gx_set_cpuspeed:
- * set cpu speed in khz.
+ * gx_set_cpuspeed:
+ * set cpu speed in khz.
**/
static void gx_set_cpuspeed(unsigned int khz)
{
- u8 suscfg, pmer1;
+ u8 suscfg, pmer1;
unsigned int new_khz;
unsigned long flags;
struct cpufreq_freqs freqs;
-
freqs.cpu = 0;
freqs.old = gx_get_cpuspeed(0);
@@ -303,18 +301,18 @@ static void gx_set_cpuspeed(unsigned int khz)
pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration);
pci_write_config_byte(gx_params->cs55x0, PCI_MODON, gx_params->on_duration);
- pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg);
- pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg);
+ pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg);
+ pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg);
- local_irq_restore(flags);
+ local_irq_restore(flags);
gx_params->pci_suscfg = suscfg;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
- gx_params->on_duration * 32, gx_params->off_duration * 32);
- dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new);
+ dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
+ gx_params->on_duration * 32, gx_params->off_duration * 32);
+ dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new);
}
/****************************************************************
@@ -322,10 +320,10 @@ static void gx_set_cpuspeed(unsigned int khz)
****************************************************************/
/*
- * cpufreq_gx_verify: test if frequency range is valid
+ * cpufreq_gx_verify: test if frequency range is valid
*
- * This function checks if a given frequency range in kHz is valid
- * for the hardware supported by the driver.
+ * This function checks if a given frequency range in kHz is valid
+ * for the hardware supported by the driver.
*/
static int cpufreq_gx_verify(struct cpufreq_policy *policy)
@@ -333,8 +331,8 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy)
unsigned int tmp_freq = 0;
u8 tmp1, tmp2;
- if (!stock_freq || !policy)
- return -EINVAL;
+ if (!stock_freq || !policy)
+ return -EINVAL;
policy->cpu = 0;
cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq);
@@ -342,14 +340,14 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy)
/* it needs to be assured that at least one supported frequency is
* within policy->min and policy->max. If it is not, policy->max
* needs to be increased until one freuqency is supported.
- * policy->min may not be decreased, though. This way we guarantee a
+ * policy->min may not be decreased, though. This way we guarantee a
* specific processing capacity.
*/
tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2);
- if (tmp_freq < policy->min)
+ if (tmp_freq < policy->min)
tmp_freq += stock_freq / max_duration;
policy->min = tmp_freq;
- if (policy->min > policy->max)
+ if (policy->min > policy->max)
policy->max = tmp_freq;
tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2);
if (tmp_freq > policy->max)
@@ -358,12 +356,12 @@ static int cpufreq_gx_verify(struct cpufreq_policy *policy)
if (policy->max < policy->min)
policy->max = policy->min;
cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq);
-
+
return 0;
}
/*
- * cpufreq_gx_target:
+ * cpufreq_gx_target:
*
*/
static int cpufreq_gx_target(struct cpufreq_policy *policy,
@@ -373,8 +371,8 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy,
u8 tmp1, tmp2;
unsigned int tmp_freq;
- if (!stock_freq || !policy)
- return -EINVAL;
+ if (!stock_freq || !policy)
+ return -EINVAL;
policy->cpu = 0;
@@ -431,7 +429,7 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
return 0;
}
-/*
+/*
* cpufreq_gx_init:
* MediaGX/Geode GX initialize cpufreq driver
*/
@@ -452,7 +450,7 @@ static int __init cpufreq_gx_init(void)
u32 class_rev;
/* Test if we have the right hardware */
- if ((gx_pci = gx_detect_chipset()) == NULL)
+ if ((gx_pci = gx_detect_chipset()) == NULL)
return -ENODEV;
/* check whether module parameters are sane */
@@ -461,10 +459,9 @@ static int __init cpufreq_gx_init(void)
dprintk("geode suspend modulation available.\n");
- params = kmalloc(sizeof(struct gxfreq_params), GFP_KERNEL);
+ params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL);
if (params == NULL)
return -ENOMEM;
- memset(params, 0, sizeof(struct gxfreq_params));
params->cs55x0 = gx_pci;
gx_params = params;
@@ -478,7 +475,7 @@ static int __init cpufreq_gx_init(void)
pci_read_config_dword(params->cs55x0, PCI_CLASS_REVISION, &class_rev);
params->pci_rev = class_rev && 0xff;
- if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) {
+ if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) {
kfree(params);
return ret; /* register error! */
}
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.h b/arch/i386/kernel/cpu/cpufreq/longhaul.h
index 2a495c162ec7..d3a95d77ee85 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.h
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.h
@@ -234,7 +234,7 @@ static int __initdata ezrat_eblcr[32] = {
/*
* VIA C3 Nehemiah */
-
+
static int __initdata nehemiah_a_clock_ratio[32] = {
100, /* 0000 -> 10.0x */
160, /* 0001 -> 16.0x */
@@ -446,7 +446,7 @@ static int __initdata nehemiah_c_eblcr[32] = {
/* end of table */
};
-/*
+/*
* Voltage scales. Div/Mod by 1000 to get actual voltage.
* Which scale to use depends on the VRM type in use.
*/
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index cc73a7ae34bc..ab6504efd801 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -14,7 +14,7 @@
* The author(s) of this software shall not be held liable for damages
* of any nature resulting due to the use of this software. This
* software is provided AS-IS with no warranties.
- *
+ *
* Date Errata Description
* 20020525 N44, O17 12.5% or 25% DC causes lockup
*
@@ -22,7 +22,7 @@
#include <linux/config.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
@@ -30,7 +30,7 @@
#include <linux/cpumask.h>
#include <linux/sched.h> /* current / set_cpus_allowed() */
-#include <asm/processor.h>
+#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/timex.h>
@@ -79,7 +79,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
} else {
dprintk("CPU#%d setting duty cycle to %d%%\n",
cpu, ((125 * newstate) / 10));
- /* bits 63 - 5 : reserved
+ /* bits 63 - 5 : reserved
* bit 4 : enable/disable
* bits 3-1 : duty cycle
* bit 0 : reserved
@@ -132,7 +132,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
}
/* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
- * Developer's Manual, Volume 3
+ * Developer's Manual, Volume 3
*/
cpus_allowed = current->cpus_allowed;
@@ -206,7 +206,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4D);
}
-
+
static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
{
@@ -234,7 +234,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
dprintk("has errata -- disabling frequencies lower than 2ghz\n");
break;
}
-
+
/* get max frequency */
stock_freq = cpufreq_p4_get_frequency(c);
if (!stock_freq)
@@ -244,13 +244,13 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
if ((i<2) && (has_N44_O17_errata[policy->cpu]))
p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
- else if (has_N60_errata[policy->cpu] && p4clockmod_table[i].frequency < 2000000)
+ else if (has_N60_errata[policy->cpu] && ((stock_freq * i)/8) < 2000000)
p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
else
p4clockmod_table[i].frequency = (stock_freq * i)/8;
}
cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
-
+
/* cpuinfo and default policy values */
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
policy->cpuinfo.transition_latency = 1000000; /* assumed */
@@ -262,7 +262,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
{
- cpufreq_frequency_table_put_attr(policy->cpu);
+ cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
@@ -298,7 +298,7 @@ static struct freq_attr* p4clockmod_attr[] = {
};
static struct cpufreq_driver p4clockmod_driver = {
- .verify = cpufreq_p4_verify,
+ .verify = cpufreq_p4_verify,
.target = cpufreq_p4_target,
.init = cpufreq_p4_cpu_init,
.exit = cpufreq_p4_cpu_exit,
@@ -310,12 +310,12 @@ static struct cpufreq_driver p4clockmod_driver = {
static int __init cpufreq_p4_init(void)
-{
+{
struct cpuinfo_x86 *c = cpu_data;
int ret;
/*
- * THERM_CONTROL is architectural for IA32 now, so
+ * THERM_CONTROL is architectural for IA32 now, so
* we can rely on the capability checks
*/
if (c->x86_vendor != X86_VENDOR_INTEL)
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k6.c b/arch/i386/kernel/cpu/cpufreq/powernow-k6.c
index 222f8cfe3c57..f89524051e4a 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k6.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k6.c
@@ -8,7 +8,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/ioport.h>
@@ -50,7 +50,7 @@ static int powernow_k6_get_cpu_multiplier(void)
{
u64 invalue = 0;
u32 msrval;
-
+
msrval = POWERNOW_IOPORT + 0x1;
wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */
invalue=inl(POWERNOW_IOPORT + 0x8);
@@ -81,7 +81,7 @@ static void powernow_k6_set_state (unsigned int best_i)
freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
freqs.new = busfreq * clock_ratio[best_i].index;
freqs.cpu = 0; /* powernow-k6.c is UP only driver */
-
+
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* we now need to transform best_i to the BVC format, see AMD#23446 */
@@ -152,7 +152,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
busfreq = cpu_khz / max_multiplier;
/* table init */
- for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+ for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
if (clock_ratio[i].index > max_multiplier)
clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID;
else
@@ -182,7 +182,7 @@ static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
powernow_k6_set_state(i);
}
cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
+ return 0;
}
static unsigned int powernow_k6_get(unsigned int cpu)
@@ -196,8 +196,8 @@ static struct freq_attr* powernow_k6_attr[] = {
};
static struct cpufreq_driver powernow_k6_driver = {
- .verify = powernow_k6_verify,
- .target = powernow_k6_target,
+ .verify = powernow_k6_verify,
+ .target = powernow_k6_target,
.init = powernow_k6_cpu_init,
.exit = powernow_k6_cpu_exit,
.get = powernow_k6_get,
@@ -215,7 +215,7 @@ static struct cpufreq_driver powernow_k6_driver = {
* on success.
*/
static int __init powernow_k6_init(void)
-{
+{
struct cpuinfo_x86 *c = cpu_data;
if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) ||
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
index edcd626001da..2bf4237cb94e 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k7.c
@@ -199,8 +199,8 @@ static int get_ranges (unsigned char *pst)
powernow_table[j].index |= (vid << 8); /* upper 8 bits */
dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
- "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
- fid_codes[fid] % 10, speed/1000, vid,
+ "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
+ fid_codes[fid] % 10, speed/1000, vid,
mobile_vid_table[vid]/1000,
mobile_vid_table[vid]%1000);
}
@@ -368,8 +368,8 @@ static int powernow_acpi_init(void)
}
dprintk (" FID: 0x%x (%d.%dx [%dMHz]) "
- "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
- fid_codes[fid] % 10, speed/1000, vid,
+ "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10,
+ fid_codes[fid] % 10, speed/1000, vid,
mobile_vid_table[vid]/1000,
mobile_vid_table[vid]%1000);
@@ -460,7 +460,7 @@ static int powernow_decode_bios (int maxfid, int startvid)
(maxfid==pst->maxfid) && (startvid==pst->startvid))
{
dprintk ("PST:%d (@%p)\n", i, pst);
- dprintk (" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n",
+ dprintk (" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n",
pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid);
ret = get_ranges ((char *) pst + sizeof (struct pst_s));
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 3d5110b65cc3..e5bc06480ff9 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -45,7 +45,7 @@
#define PFX "powernow-k8: "
#define BFX PFX "BIOS error: "
-#define VERSION "version 1.60.0"
+#define VERSION "version 1.60.1"
#include "powernow-k8.h"
/* serialize freq changes */
@@ -54,7 +54,7 @@ static DECLARE_MUTEX(fidvid_sem);
static struct powernow_k8_data *powernow_data[NR_CPUS];
#ifndef CONFIG_SMP
-static cpumask_t cpu_core_map[1];
+static cpumask_t cpu_core_map[1] = { CPU_MASK_ALL };
#endif
/* Return a frequency in MHz, given an input fid */
@@ -83,11 +83,10 @@ static u32 find_millivolts_from_vid(struct powernow_k8_data *data, u32 vid)
*/
static u32 convert_fid_to_vco_fid(u32 fid)
{
- if (fid < HI_FID_TABLE_BOTTOM) {
+ if (fid < HI_FID_TABLE_BOTTOM)
return 8 + (2 * fid);
- } else {
+ else
return fid;
- }
}
/*
@@ -177,7 +176,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
if (i++ > 100) {
printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n");
return 1;
- }
+ }
} while (query_current_values_with_pending_wait(data));
count_off_irt(data);
@@ -474,8 +473,10 @@ static int check_supported_cpu(unsigned int cpu)
goto out;
eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
+ if ((eax & CPUID_XFAM) != CPUID_XFAM_K8)
+ goto out;
+
if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
- ((eax & CPUID_XFAM) != CPUID_XFAM_K8) ||
((eax & CPUID_XMOD) > CPUID_XMOD_REV_G)) {
printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
goto out;
@@ -780,9 +781,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
/* verify only 1 entry from the lo frequency table */
if (fid < HI_FID_TABLE_BOTTOM) {
if (cntlofreq) {
- /* if both entries are the same, ignore this
- * one...
- */
+ /* if both entries are the same, ignore this one ... */
if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) ||
(powernow_table[i].index != powernow_table[cntlofreq].index)) {
printk(KERN_ERR PFX "Too many lo freq table entries\n");
@@ -854,7 +853,7 @@ static int transition_frequency(struct powernow_k8_data *data, unsigned int inde
dprintk("cpu %d transition to index %u\n", smp_processor_id(), index);
/* fid are the lower 8 bits of the index we stored into
- * the cpufreq frequency table in find_psb_table, vid are
+ * the cpufreq frequency table in find_psb_table, vid are
* the upper 8 bits.
*/
@@ -909,7 +908,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
u32 checkvid = data->currvid;
unsigned int newstate;
int ret = -EIO;
- int i;
/* only run on specific CPU from here on */
oldmask = current->cpus_allowed;
@@ -955,12 +953,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
up(&fidvid_sem);
goto err_out;
}
-
- /* Update all the fid/vids of our siblings */
- for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
- powernow_data[i]->currvid = data->currvid;
- powernow_data[i]->currfid = data->currfid;
- }
up(&fidvid_sem);
pol->cur = find_khz_freq_from_fid(data->currfid);
@@ -1048,7 +1040,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
pol->cpus = cpu_core_map[pol->cpu];
- /* Take a crude guess here.
+ /* Take a crude guess here.
* That guess was in microseconds, so multiply with 1000 */
pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
+ (3 * (1 << data->irt) * 10)) * 1000;
@@ -1070,9 +1062,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
- for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
+ for_each_cpu_mask(i, cpu_core_map[pol->cpu])
powernow_data[i] = data;
- }
return 0;
@@ -1145,14 +1136,14 @@ static int __cpuinit powernowk8_init(void)
{
unsigned int i, supported_cpus = 0;
- for_each_cpu(i) {
+ for_each_online_cpu(i) {
if (check_supported_cpu(i))
supported_cpus++;
}
if (supported_cpus == num_online_cpus()) {
- printk(KERN_INFO PFX "Found %d AMD Athlon 64 / Opteron processors (" VERSION ")\n",
- supported_cpus);
+ printk(KERN_INFO PFX "Found %d AMD Athlon 64 / Opteron "
+ "processors (" VERSION ")\n", supported_cpus);
return cpufreq_register_driver(&cpufreq_amd64_driver);
}
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
index d0de37d58e9a..00ea899c17e1 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
@@ -63,7 +63,7 @@ struct powernow_k8_data {
#define MSR_C_LO_VID_SHIFT 8
/* Field definitions within the FID VID High Control MSR : */
-#define MSR_C_HI_STP_GNT_TO 0x000fffff
+#define MSR_C_HI_STP_GNT_TO 0x000fffff
/* Field definitions within the FID VID Low Status MSR : */
#define MSR_S_LO_CHANGE_PENDING 0x80000000 /* cleared when completed */
@@ -123,7 +123,7 @@ struct powernow_k8_data {
* Most values of interest are enocoded in a single field of the _PSS
* entries: the "control" value.
*/
-
+
#define IRT_SHIFT 30
#define RVO_SHIFT 28
#define EXT_TYPE_SHIFT 27
@@ -185,7 +185,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned
#ifndef for_each_cpu_mask
#define for_each_cpu_mask(i,mask) for (i=0;i<1;i++)
#endif
-
+
#ifdef CONFIG_SMP
static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
{
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index c173c0fa117a..b0ff9075708c 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -479,15 +479,13 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
unsigned l, h;
int ret;
int i;
- struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
/* Only Intel makes Enhanced Speedstep-capable CPUs */
if (cpu->x86_vendor != X86_VENDOR_INTEL || !cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV;
- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
- }
if (centrino_cpu_init_acpi(policy)) {
if (policy->cpu != 0)
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
index 7c47005a1805..4f46cac155c4 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
@@ -9,7 +9,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
@@ -36,8 +36,8 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
/* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */
struct {
unsigned int ratio; /* Frequency Multiplier (x10) */
- u8 bitmap; /* power on configuration bits
- [27, 25:22] (in MSR 0x2a) */
+ u8 bitmap; /* power on configuration bits
+ [27, 25:22] (in MSR 0x2a) */
} msr_decode_mult [] = {
{ 30, 0x01 },
{ 35, 0x05 },
@@ -58,9 +58,9 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
/* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */
struct {
- unsigned int value; /* Front Side Bus speed in MHz */
- u8 bitmap; /* power on configuration bits [18: 19]
- (in MSR 0x2a) */
+ unsigned int value; /* Front Side Bus speed in MHz */
+ u8 bitmap; /* power on configuration bits [18: 19]
+ (in MSR 0x2a) */
} msr_decode_fsb [] = {
{ 66, 0x0 },
{ 100, 0x2 },
@@ -68,8 +68,8 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
{ 0, 0xff}
};
- u32 msr_lo, msr_tmp;
- int i = 0, j = 0;
+ u32 msr_lo, msr_tmp;
+ int i = 0, j = 0;
/* read MSR 0x2a - we only need the low 32 bits */
rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
@@ -106,7 +106,7 @@ static unsigned int pentium3_get_frequency (unsigned int processor)
static unsigned int pentiumM_get_frequency(void)
{
- u32 msr_lo, msr_tmp;
+ u32 msr_lo, msr_tmp;
rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
dprintk("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
@@ -134,7 +134,7 @@ static unsigned int pentium4_get_frequency(void)
dprintk("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi);
- /* decode the FSB: see IA-32 Intel (C) Architecture Software
+ /* decode the FSB: see IA-32 Intel (C) Architecture Software
* Developer's Manual, Volume 3: System Prgramming Guide,
* revision #12 in Table B-1: MSRs in the Pentium 4 and
* Intel Xeon Processors, on page B-4 and B-5.
@@ -170,7 +170,7 @@ static unsigned int pentium4_get_frequency(void)
return (fsb * mult);
}
-
+
unsigned int speedstep_get_processor_frequency(unsigned int processor)
{
switch (processor) {
@@ -198,11 +198,11 @@ EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency);
unsigned int speedstep_detect_processor (void)
{
struct cpuinfo_x86 *c = cpu_data;
- u32 ebx, msr_lo, msr_hi;
+ u32 ebx, msr_lo, msr_hi;
dprintk("x86: %x, model: %x\n", c->x86, c->x86_model);
- if ((c->x86_vendor != X86_VENDOR_INTEL) ||
+ if ((c->x86_vendor != X86_VENDOR_INTEL) ||
((c->x86 != 6) && (c->x86 != 0xF)))
return 0;
@@ -218,15 +218,15 @@ unsigned int speedstep_detect_processor (void)
dprintk("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
switch (c->x86_mask) {
- case 4:
+ case 4:
/*
- * B-stepping [M-P4-M]
+ * B-stepping [M-P4-M]
* sample has ebx = 0x0f, production has 0x0e.
*/
if ((ebx == 0x0e) || (ebx == 0x0f))
return SPEEDSTEP_PROCESSOR_P4M;
break;
- case 7:
+ case 7:
/*
* C-stepping [M-P4-M]
* needs to have ebx=0x0e, else it's a celeron:
@@ -253,7 +253,7 @@ unsigned int speedstep_detect_processor (void)
* also, M-P4M HTs have ebx=0x8, too
* For now, they are distinguished by the model_id string
*/
- if ((ebx == 0x0e) || (strstr(c->x86_model_id,"Mobile Intel(R) Pentium(R) 4") != NULL))
+ if ((ebx == 0x0e) || (strstr(c->x86_model_id,"Mobile Intel(R) Pentium(R) 4") != NULL))
return SPEEDSTEP_PROCESSOR_P4M;
break;
default:
@@ -264,8 +264,7 @@ unsigned int speedstep_detect_processor (void)
switch (c->x86_model) {
case 0x0B: /* Intel PIII [Tualatin] */
- /* cpuid_ebx(1) is 0x04 for desktop PIII,
- 0x06 for mobile PIII-M */
+ /* cpuid_ebx(1) is 0x04 for desktop PIII, 0x06 for mobile PIII-M */
ebx = cpuid_ebx(0x00000001);
dprintk("ebx is %x\n", ebx);
@@ -275,9 +274,8 @@ unsigned int speedstep_detect_processor (void)
return 0;
/* So far all PIII-M processors support SpeedStep. See
- * Intel's 24540640.pdf of June 2003
+ * Intel's 24540640.pdf of June 2003
*/
-
return SPEEDSTEP_PROCESSOR_PIII_T;
case 0x08: /* Intel PIII [Coppermine] */
@@ -399,7 +397,7 @@ unsigned int speedstep_get_freqs(unsigned int processor,
}
}
- out:
+out:
local_irq_restore(flags);
return (ret);
}
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
index 6a727fd3a77e..b735429c50b4 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
@@ -14,7 +14,7 @@
#define SPEEDSTEP_PROCESSOR_PIII_C_EARLY 0x00000001 /* Coppermine core */
#define SPEEDSTEP_PROCESSOR_PIII_C 0x00000002 /* Coppermine core */
-#define SPEEDSTEP_PROCESSOR_PIII_T 0x00000003 /* Tualatin core */
+#define SPEEDSTEP_PROCESSOR_PIII_T 0x00000003 /* Tualatin core */
#define SPEEDSTEP_PROCESSOR_P4M 0x00000004 /* P4-M */
/* the following processors are not speedstep-capable and are not auto-detected
@@ -25,8 +25,8 @@
/* speedstep states -- only two of them */
-#define SPEEDSTEP_HIGH 0x00000000
-#define SPEEDSTEP_LOW 0x00000001
+#define SPEEDSTEP_HIGH 0x00000000
+#define SPEEDSTEP_LOW 0x00000001
/* detect a speedstep-capable processor */
@@ -36,13 +36,13 @@ extern unsigned int speedstep_detect_processor (void);
extern unsigned int speedstep_get_processor_frequency(unsigned int processor);
-/* detect the low and high speeds of the processor. The callback
- * set_state"'s first argument is either SPEEDSTEP_HIGH or
- * SPEEDSTEP_LOW; the second argument is zero so that no
+/* detect the low and high speeds of the processor. The callback
+ * set_state"'s first argument is either SPEEDSTEP_HIGH or
+ * SPEEDSTEP_LOW; the second argument is zero so that no
* cpufreq_notify_transition calls are initiated.
*/
extern unsigned int speedstep_get_freqs(unsigned int processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state));
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state) (unsigned int state));
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
index 28cc5d524afc..c28333d53646 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
@@ -13,8 +13,8 @@
*********************************************************************/
#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/pci.h>
@@ -28,21 +28,21 @@
*
* These parameters are got from IST-SMI BIOS call.
* If user gives it, these are used.
- *
+ *
*/
-static int smi_port = 0;
-static int smi_cmd = 0;
-static unsigned int smi_sig = 0;
+static int smi_port = 0;
+static int smi_cmd = 0;
+static unsigned int smi_sig = 0;
/* info about the processor */
-static unsigned int speedstep_processor = 0;
+static unsigned int speedstep_processor = 0;
-/*
- * There are only two frequency states for each processor. Values
+/*
+ * There are only two frequency states for each processor. Values
* are in kHz for the time being.
*/
static struct cpufreq_frequency_table speedstep_freqs[] = {
- {SPEEDSTEP_HIGH, 0},
+ {SPEEDSTEP_HIGH, 0},
{SPEEDSTEP_LOW, 0},
{0, CPUFREQ_TABLE_END},
};
@@ -75,7 +75,9 @@ static int speedstep_smi_ownership (void)
__asm__ __volatile__(
"out %%al, (%%dx)\n"
: "=D" (result)
- : "a" (command), "b" (function), "c" (0), "d" (smi_port), "D" (0), "S" (magic)
+ : "a" (command), "b" (function), "c" (0), "d" (smi_port),
+ "D" (0), "S" (magic)
+ : "memory"
);
dprintk("result is %x\n", result);
@@ -123,7 +125,7 @@ static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high)
*low = low_mhz * 1000;
return result;
-}
+}
/**
* speedstep_get_state - set the SpeedStep state
@@ -204,7 +206,7 @@ static void speedstep_set_state (unsigned int state)
* speedstep_target - set a new CPUFreq policy
* @policy: new policy
* @target_freq: new freq
- * @relation:
+ * @relation:
*
* Sets a new CPUFreq policy/freq.
*/
@@ -283,7 +285,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
state = speedstep_get_state();
speed = speedstep_freqs[state].frequency;
- dprintk("currently at %s speed setting - %i MHz\n",
+ dprintk("currently at %s speed setting - %i MHz\n",
(speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) ? "low" : "high",
(speed / 1000));
@@ -296,7 +298,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
if (result)
return (result);
- cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
+ cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
return 0;
}
@@ -332,8 +334,8 @@ static struct freq_attr* speedstep_attr[] = {
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-smi",
- .verify = speedstep_verify,
- .target = speedstep_target,
+ .verify = speedstep_verify,
+ .target = speedstep_target,
.init = speedstep_cpu_init,
.exit = speedstep_cpu_exit,
.get = speedstep_get,
@@ -370,13 +372,12 @@ static int __init speedstep_init(void)
return -ENODEV;
}
- dprintk("signature:0x%.8lx, command:0x%.8lx, event:0x%.8lx, perf_level:0x%.8lx.\n",
+ dprintk("signature:0x%.8lx, command:0x%.8lx, event:0x%.8lx, perf_level:0x%.8lx.\n",
ist_info.signature, ist_info.command, ist_info.event, ist_info.perf_level);
-
- /* Error if no IST-SMI BIOS or no PARM
+ /* Error if no IST-SMI BIOS or no PARM
sig= 'ISGE' aka 'Intel Speedstep Gate E' */
- if ((ist_info.signature != 0x47534943) && (
+ if ((ist_info.signature != 0x47534943) && (
(smi_port == 0) || (smi_cmd == 0)))
return -ENODEV;
@@ -386,17 +387,15 @@ static int __init speedstep_init(void)
smi_sig = ist_info.signature;
/* setup smi_port from MODLULE_PARM or BIOS */
- if ((smi_port > 0xff) || (smi_port < 0)) {
+ if ((smi_port > 0xff) || (smi_port < 0))
return -EINVAL;
- } else if (smi_port == 0) {
+ else if (smi_port == 0)
smi_port = ist_info.command & 0xff;
- }
- if ((smi_cmd > 0xff) || (smi_cmd < 0)) {
+ if ((smi_cmd > 0xff) || (smi_cmd < 0))
return -EINVAL;
- } else if (smi_cmd == 0) {
+ else if (smi_cmd == 0)
smi_cmd = (ist_info.command >> 16) & 0xff;
- }
return cpufreq_register_driver(&speedstep_driver);
}
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 5cfbd8011698..f94cdb7aca50 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -45,7 +45,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* Intel-defined (#2) */
- "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
+ "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
"tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c
index 6a93d75db431..ebc8dc116c43 100644
--- a/arch/i386/kernel/dmi_scan.c
+++ b/arch/i386/kernel/dmi_scan.c
@@ -5,6 +5,7 @@
#include <linux/dmi.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
+#include <asm/dmi.h>
static char * __init dmi_string(struct dmi_header *dm, u8 s)
{
@@ -106,7 +107,7 @@ static void __init dmi_save_devices(struct dmi_header *dm)
struct dmi_device *dev;
for (i = 0; i < count; i++) {
- char *d = ((char *) dm) + (i * 2);
+ char *d = (char *)(dm + 1) + (i * 2);
/* Skip disabled device */
if ((*d & 0x80) == 0)
@@ -299,3 +300,33 @@ struct dmi_device * dmi_find_device(int type, const char *name,
return NULL;
}
EXPORT_SYMBOL(dmi_find_device);
+
+/**
+ * dmi_get_year - Return year of a DMI date
+ * @field: data index (like dmi_get_system_info)
+ *
+ * Returns -1 when the field doesn't exist. 0 when it is broken.
+ */
+int dmi_get_year(int field)
+{
+ int year;
+ char *s = dmi_get_system_info(field);
+
+ if (!s)
+ return -1;
+ if (*s == '\0')
+ return 0;
+ s = strrchr(s, '/');
+ if (!s)
+ return 0;
+
+ s += 1;
+ year = simple_strtoul(s, NULL, 0);
+ if (year && year < 100) { /* 2-digit year */
+ year += 1900;
+ if (year < 1996) /* no dates < spec 1.0 */
+ year += 100;
+ }
+
+ return year;
+}
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 5390b521aca0..55bc365b8753 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -202,8 +202,6 @@ static inline void mark_microcode_update (int cpu_num, microcode_header_t *mc_he
} else if (mc_header->rev == uci->rev) {
/* notify the caller of success on this cpu */
uci->err = MC_SUCCESS;
- printk(KERN_ERR "microcode: CPU%d already at revision"
- " 0x%x (current=0x%x)\n", cpu_num, mc_header->rev, uci->rev);
goto out;
}
@@ -369,7 +367,6 @@ static void do_update_one (void * unused)
struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
if (uci->mc == NULL) {
- printk(KERN_INFO "microcode: No new microcode data for CPU%d\n", cpu_num);
return;
}
@@ -511,7 +508,6 @@ static int __init microcode_init (void)
static void __exit microcode_exit (void)
{
misc_deregister(&microcode_dev);
- printk(KERN_INFO "IA-32 Microcode Update Driver v" MICROCODE_VERSION " unregistered\n");
}
module_init(microcode_init)
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 4c470e99a742..82371d83bfa9 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -1003,7 +1003,6 @@ void cpu_exit_clear(void)
cpu_clear(cpu, cpu_callout_map);
cpu_clear(cpu, cpu_callin_map);
- cpu_clear(cpu, cpu_present_map);
cpu_clear(cpu, smp_commenced_mask);
unmap_cpu_to_logical_apicid(cpu);
@@ -1015,31 +1014,20 @@ struct warm_boot_cpu_info {
int cpu;
};
-static void __devinit do_warm_boot_cpu(void *p)
+static void __cpuinit do_warm_boot_cpu(void *p)
{
struct warm_boot_cpu_info *info = p;
do_boot_cpu(info->apicid, info->cpu);
complete(info->complete);
}
-int __devinit smp_prepare_cpu(int cpu)
+static int __cpuinit __smp_prepare_cpu(int cpu)
{
DECLARE_COMPLETION(done);
struct warm_boot_cpu_info info;
struct work_struct task;
int apicid, ret;
- lock_cpu_hotplug();
-
- /*
- * On x86, CPU0 is never offlined. Trying to bring up an
- * already-booted CPU will hang. So check for that case.
- */
- if (cpu_online(cpu)) {
- ret = -EINVAL;
- goto exit;
- }
-
apicid = x86_cpu_to_apicid[cpu];
if (apicid == BAD_APICID) {
ret = -ENODEV;
@@ -1064,7 +1052,6 @@ int __devinit smp_prepare_cpu(int cpu)
zap_low_mappings();
ret = 0;
exit:
- unlock_cpu_hotplug();
return ret;
}
#endif
@@ -1392,6 +1379,22 @@ void __cpu_die(unsigned int cpu)
int __devinit __cpu_up(unsigned int cpu)
{
+#ifdef CONFIG_HOTPLUG_CPU
+ int ret=0;
+
+ /*
+ * We do warm boot only on cpus that had booted earlier
+ * Otherwise cold boot is all handled from smp_boot_cpus().
+ * cpu_callin_map is set during AP kickstart process. Its reset
+ * when a cpu is taken offline from cpu_exit_clear().
+ */
+ if (!cpu_isset(cpu, cpu_callin_map))
+ ret = __smp_prepare_cpu(cpu);
+
+ if (ret)
+ return -EIO;
+#endif
+
/* In case one didn't come up */
if (!cpu_isset(cpu, cpu_callin_map)) {
printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 3f21c6f6466d..8831303a473f 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -7,6 +7,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#include <asm/page.h>
+#include <asm/cache.h>
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_ARCH(i386)
@@ -135,7 +136,7 @@ SECTIONS
__initramfs_start = .;
.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
__initramfs_end = .;
- . = ALIGN(32);
+ . = ALIGN(L1_CACHE_BYTES);
__per_cpu_start = .;
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
__per_cpu_end = .;
diff --git a/arch/i386/mach-visws/reboot.c b/arch/i386/mach-visws/reboot.c
index 5d73e042ed0a..99332abfad42 100644
--- a/arch/i386/mach-visws/reboot.c
+++ b/arch/i386/mach-visws/reboot.c
@@ -1,7 +1,6 @@
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/delay.h>
-#include <linux/platform.h>
#include <asm/io.h>
#include "piix4.h"
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index ff7ae6b664e8..10b6b9e7716b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -252,6 +252,15 @@ config NR_CPUS
than 64 will cause the use of a CPU mask array, causing a small
performance hit.
+config IA64_NR_NODES
+ int "Maximum number of NODEs (256-1024)" if (IA64_SGI_SN2 || IA64_GENERIC)
+ range 256 1024
+ depends on IA64_SGI_SN2 || IA64_GENERIC
+ default "256"
+ help
+ This option specifies the maximum number of nodes in your SSI system.
+ If in doubt, use the default.
+
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
depends on SMP && EXPERIMENTAL
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index f722e1a25948..80ea7506fa1a 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -1,6 +1,9 @@
#
# ia64/Makefile
#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
@@ -62,7 +65,7 @@ drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/
boot := arch/ia64/hp/sim/boot
-.PHONY: boot compressed check
+PHONY += boot compressed check
all: compressed unwcheck
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index 744fd2f79f61..0d29aa2066b3 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -116,6 +116,7 @@ CONFIG_IOSAPIC=y
CONFIG_FORCE_MAX_ZONEORDER=17
CONFIG_SMP=y
CONFIG_NR_CPUS=512
+CONFIG_IA64_NR_NODES=256
CONFIG_HOTPLUG_CPU=y
# CONFIG_SCHED_SMT is not set
# CONFIG_PREEMPT is not set
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index 8206752161bb..a718034d68d0 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -116,6 +116,7 @@ CONFIG_IA64_SGI_SN_XP=m
CONFIG_FORCE_MAX_ZONEORDER=17
CONFIG_SMP=y
CONFIG_NR_CPUS=1024
+CONFIG_IA64_NR_NODES=256
# CONFIG_HOTPLUG_CPU is not set
CONFIG_SCHED_SMT=y
CONFIG_PREEMPT=y
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig
index 3e767288a745..6cba55da572a 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/defconfig
@@ -116,6 +116,7 @@ CONFIG_IOSAPIC=y
CONFIG_FORCE_MAX_ZONEORDER=17
CONFIG_SMP=y
CONFIG_NR_CPUS=512
+CONFIG_IA64_NR_NODES=256
CONFIG_HOTPLUG_CPU=y
# CONFIG_SCHED_SMT is not set
# CONFIG_PREEMPT is not set
diff --git a/arch/ia64/dig/setup.c b/arch/ia64/dig/setup.c
index c9104bfff667..38aa9c108857 100644
--- a/arch/ia64/dig/setup.c
+++ b/arch/ia64/dig/setup.c
@@ -69,8 +69,3 @@ dig_setup (char **cmdline_p)
screen_info.orig_video_isVGA = 1; /* XXX fake */
screen_info.orig_video_ega_bx = 3; /* XXX fake */
}
-
-void __init
-dig_irq_init (void)
-{
-}
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 70dba1f0e2ee..13e739e4c84d 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -1166,19 +1166,7 @@ put_tv32 (struct compat_timeval __user *o, struct timeval *i)
asmlinkage unsigned long
sys32_alarm (unsigned int seconds)
{
- struct itimerval it_new, it_old;
- unsigned int oldalarm;
-
- it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
- it_new.it_value.tv_sec = seconds;
- it_new.it_value.tv_usec = 0;
- do_setitimer(ITIMER_REAL, &it_new, &it_old);
- oldalarm = it_old.it_value.tv_sec;
- /* ehhh.. We can't return 0 if we have an alarm pending.. */
- /* And we'd better return too much than too little anyway */
- if (it_old.it_value.tv_usec)
- oldalarm++;
- return oldalarm;
+ return alarm_setitimer(seconds);
}
/* Translations due to time_t size differences. Which affects all
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 4722ec51c70c..a4e218ce2edb 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -420,6 +420,26 @@ int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
int __initdata nid_to_pxm_map[MAX_NUMNODES];
static struct acpi_table_slit __initdata *slit_table;
+static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa)
+{
+ int pxm;
+
+ pxm = pa->proximity_domain;
+ if (ia64_platform_is("sn2"))
+ pxm += pa->reserved[0] << 8;
+ return pxm;
+}
+
+static int get_memory_proximity_domain(struct acpi_table_memory_affinity *ma)
+{
+ int pxm;
+
+ pxm = ma->proximity_domain;
+ if (ia64_platform_is("sn2"))
+ pxm += ma->reserved1[0] << 8;
+ return pxm;
+}
+
/*
* ACPI 2.0 SLIT (System Locality Information Table)
* http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
@@ -443,13 +463,20 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
void __init
acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
{
+ int pxm;
+
+ if (!pa->flags.enabled)
+ return;
+
+ pxm = get_processor_proximity_domain(pa);
+
/* record this node in proximity bitmap */
- pxm_bit_set(pa->proximity_domain);
+ pxm_bit_set(pxm);
node_cpuid[srat_num_cpus].phys_id =
(pa->apic_id << 8) | (pa->lsapic_eid);
/* nid should be overridden as logical node id later */
- node_cpuid[srat_num_cpus].nid = pa->proximity_domain;
+ node_cpuid[srat_num_cpus].nid = pxm;
srat_num_cpus++;
}
@@ -457,10 +484,10 @@ void __init
acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
{
unsigned long paddr, size;
- u8 pxm;
+ int pxm;
struct node_memblk_s *p, *q, *pend;
- pxm = ma->proximity_domain;
+ pxm = get_memory_proximity_domain(ma);
/* fill node memory chunk structure */
paddr = ma->base_addr_hi;
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index dcd906fe5749..829a43cab797 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -865,6 +865,7 @@ ENTRY(interrupt)
;;
SAVE_REST
;;
+ MCA_RECOVER_RANGE(interrupt)
alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
mov out0=cr.ivr // pass cr.ivr as first arg
add out1=16,sp // pass pointer to pt_regs as second arg
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index c3a04ee7f4f6..4b0b71d5aef4 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -14,7 +14,15 @@
struct ia64_machine_vector ia64_mv;
EXPORT_SYMBOL(ia64_mv);
-static struct ia64_machine_vector *
+static __initdata const char *mvec_name;
+static __init int setup_mvec(char *s)
+{
+ mvec_name = s;
+ return 0;
+}
+early_param("machvec", setup_mvec);
+
+static struct ia64_machine_vector * __init
lookup_machvec (const char *name)
{
extern struct ia64_machine_vector machvec_start[];
@@ -33,10 +41,13 @@ machvec_init (const char *name)
{
struct ia64_machine_vector *mv;
+ if (!name)
+ name = mvec_name ? mvec_name : acpi_get_sysname();
mv = lookup_machvec(name);
- if (!mv) {
- panic("generic kernel failed to find machine vector for platform %s!", name);
- }
+ if (!mv)
+ panic("generic kernel failed to find machine vector for"
+ " platform %s!", name);
+
ia64_mv = *mv;
printk(KERN_INFO "booting generic kernel on platform %s\n", name);
}
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index b57e723f194c..87ff7fe33cfb 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -83,6 +83,7 @@
#include <asm/irq.h>
#include <asm/hw_irq.h>
+#include "mca_drv.h"
#include "entry.h"
#if defined(IA64_MCA_DEBUG_INFO)
@@ -133,7 +134,7 @@ static int cpe_poll_enabled = 1;
extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
-static int mca_init;
+static int mca_init __initdata;
static void inline
@@ -184,7 +185,7 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
* Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
* Outputs : None
*/
-static void
+static void __init
ia64_log_init(int sal_info_type)
{
u64 max_size = 0;
@@ -281,6 +282,50 @@ ia64_mca_log_sal_error_record(int sal_info_type)
ia64_sal_clear_state_info(sal_info_type);
}
+/*
+ * search_mca_table
+ * See if the MCA surfaced in an instruction range
+ * that has been tagged as recoverable.
+ *
+ * Inputs
+ * first First address range to check
+ * last Last address range to check
+ * ip Instruction pointer, address we are looking for
+ *
+ * Return value:
+ * 1 on Success (in the table)/ 0 on Failure (not in the table)
+ */
+int
+search_mca_table (const struct mca_table_entry *first,
+ const struct mca_table_entry *last,
+ unsigned long ip)
+{
+ const struct mca_table_entry *curr;
+ u64 curr_start, curr_end;
+
+ curr = first;
+ while (curr <= last) {
+ curr_start = (u64) &curr->start_addr + curr->start_addr;
+ curr_end = (u64) &curr->end_addr + curr->end_addr;
+
+ if ((ip >= curr_start) && (ip <= curr_end)) {
+ return 1;
+ }
+ curr++;
+ }
+ return 0;
+}
+
+/* Given an address, look for it in the mca tables. */
+int mca_recover_range(unsigned long addr)
+{
+ extern struct mca_table_entry __start___mca_table[];
+ extern struct mca_table_entry __stop___mca_table[];
+
+ return search_mca_table(__start___mca_table, __stop___mca_table-1, addr);
+}
+EXPORT_SYMBOL_GPL(mca_recover_range);
+
#ifdef CONFIG_ACPI
int cpe_vector = -1;
@@ -355,7 +400,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
* Outputs
* None
*/
-static void
+static void __init
ia64_mca_register_cpev (int cpev)
{
/* Register the CPE interrupt vector with SAL */
@@ -386,7 +431,7 @@ ia64_mca_register_cpev (int cpev)
* Outputs
* None
*/
-void
+void __cpuinit
ia64_mca_cmc_vector_setup (void)
{
cmcv_reg_t cmcv;
@@ -747,31 +792,34 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
ia64_mca_modify_comm(previous_current);
goto no_mod;
}
- if (r13 != sos->prev_IA64_KR_CURRENT) {
- msg = "inconsistent previous current and r13";
- goto no_mod;
- }
- if ((r12 - r13) >= KERNEL_STACK_SIZE) {
- msg = "inconsistent r12 and r13";
- goto no_mod;
- }
- if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
- msg = "inconsistent ar.bspstore and r13";
- goto no_mod;
- }
- va.p = old_bspstore;
- if (va.f.reg < 5) {
- msg = "old_bspstore is in the wrong region";
- goto no_mod;
- }
- if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
- msg = "inconsistent ar.bsp and r13";
- goto no_mod;
- }
- size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
- if (ar_bspstore + size > r12) {
- msg = "no room for blocked state";
- goto no_mod;
+
+ if (!mca_recover_range(ms->pmsa_iip)) {
+ if (r13 != sos->prev_IA64_KR_CURRENT) {
+ msg = "inconsistent previous current and r13";
+ goto no_mod;
+ }
+ if ((r12 - r13) >= KERNEL_STACK_SIZE) {
+ msg = "inconsistent r12 and r13";
+ goto no_mod;
+ }
+ if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
+ msg = "inconsistent ar.bspstore and r13";
+ goto no_mod;
+ }
+ va.p = old_bspstore;
+ if (va.f.reg < 5) {
+ msg = "old_bspstore is in the wrong region";
+ goto no_mod;
+ }
+ if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
+ msg = "inconsistent ar.bsp and r13";
+ goto no_mod;
+ }
+ size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
+ if (ar_bspstore + size > r12) {
+ msg = "no room for blocked state";
+ goto no_mod;
+ }
}
ia64_mca_modify_comm(previous_current);
@@ -1443,7 +1491,7 @@ static struct irqaction mca_cpep_irqaction = {
* format most of the fields.
*/
-static void
+static void __cpuinit
format_mca_init_stack(void *mca_data, unsigned long offset,
const char *type, int cpu)
{
@@ -1467,7 +1515,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
/* Do per-CPU MCA-related initialization. */
-void __devinit
+void __cpuinit
ia64_mca_cpu_init(void *cpu_data)
{
void *pal_vaddr;
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index e883d85906db..37c88eb55873 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -6,6 +6,7 @@
* Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com)
* Copyright (C) 2005 Silicon Graphics, Inc
* Copyright (C) 2005 Keith Owens <kaos@sgi.com>
+ * Copyright (C) 2006 Russ Anderson <rja@sgi.com>
*/
#include <linux/config.h>
#include <linux/types.h>
@@ -121,11 +122,12 @@ mca_page_isolate(unsigned long paddr)
*/
void
-mca_handler_bh(unsigned long paddr)
+mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
{
- printk(KERN_ERR
- "OS_MCA: process [pid: %d](%s) encounters MCA (paddr=%lx)\n",
- current->pid, current->comm, paddr);
+ printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, "
+ "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n",
+ raw_smp_processor_id(), current->pid, current->uid,
+ iip, ipsr, paddr, current->comm);
spin_lock(&mca_bh_lock);
switch (mca_page_isolate(paddr)) {
@@ -442,21 +444,26 @@ recover_from_read_error(slidx_table_t *slidx,
if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate))
return 0;
psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr);
+ psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr);
/*
* Check the privilege level of interrupted context.
* If it is user-mode, then terminate affected process.
*/
- if (psr1->cpl != 0) {
+
+ pmsa = sos->pal_min_state;
+ if (psr1->cpl != 0 ||
+ ((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) {
smei = peidx_bus_check(peidx, 0);
if (smei->valid.target_identifier) {
/*
* setup for resume to bottom half of MCA,
* "mca_handler_bhhook"
*/
- pmsa = sos->pal_min_state;
- /* pass to bhhook as 1st argument (gr8) */
+ /* pass to bhhook as argument (gr8, ...) */
pmsa->pmsa_gr[8-1] = smei->target_identifier;
+ pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip;
+ pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr;
/* set interrupted return address (but no use) */
pmsa->pmsa_br0 = pmsa->pmsa_iip;
/* change resume address to bottom half */
@@ -466,6 +473,7 @@ recover_from_read_error(slidx_table_t *slidx,
psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
psr2->cpl = 0;
psr2->ri = 0;
+ psr2->bn = 1;
psr2->i = 0;
return 1;
diff --git a/arch/ia64/kernel/mca_drv.h b/arch/ia64/kernel/mca_drv.h
index e2f6fa1e0ef6..31a2e52bb16f 100644
--- a/arch/ia64/kernel/mca_drv.h
+++ b/arch/ia64/kernel/mca_drv.h
@@ -111,3 +111,10 @@ typedef struct slidx_table {
slidx_foreach_entry(__pos, &((slidx)->sec)) { __count++; }\
__count; })
+struct mca_table_entry {
+ int start_addr; /* location-relative starting address of MCA recoverable range */
+ int end_addr; /* location-relative ending address of MCA recoverable range */
+};
+
+extern const struct mca_table_entry *search_mca_tables (unsigned long addr);
+extern int mca_recover_range(unsigned long);
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S
index 3f298ee4d00c..e6a580d354b9 100644
--- a/arch/ia64/kernel/mca_drv_asm.S
+++ b/arch/ia64/kernel/mca_drv_asm.S
@@ -14,15 +14,12 @@
GLOBAL_ENTRY(mca_handler_bhhook)
invala // clear RSE ?
- ;;
cover
;;
clrrrb
;;
- alloc r16=ar.pfs,0,2,1,0 // make a new frame
- ;;
+ alloc r16=ar.pfs,0,2,3,0 // make a new frame
mov ar.rsc=0
- ;;
mov r13=IA64_KR(CURRENT) // current task pointer
;;
mov r2=r13
@@ -30,7 +27,6 @@ GLOBAL_ENTRY(mca_handler_bhhook)
addl r22=IA64_RBS_OFFSET,r2
;;
mov ar.bspstore=r22
- ;;
addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2
;;
adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
@@ -40,12 +36,12 @@ GLOBAL_ENTRY(mca_handler_bhhook)
movl loc1=mca_handler_bh // recovery C function
;;
mov out0=r8 // poisoned address
+ mov out1=r9 // iip
+ mov out2=r10 // psr
mov b6=loc1
;;
mov loc1=rp
- ;;
- ssm psr.i
- ;;
+ ssm psr.i | psr.ic
br.call.sptk.many rp=b6 // does not return ...
;;
mov ar.pfs=loc0
@@ -53,5 +49,4 @@ GLOBAL_ENTRY(mca_handler_bhhook)
;;
mov r8=r0
br.ret.sptk.many rp
- ;;
END(mca_handler_bhhook)
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
index a68ce6678092..0766493d4d00 100644
--- a/arch/ia64/kernel/numa.c
+++ b/arch/ia64/kernel/numa.c
@@ -25,7 +25,7 @@
#include <asm/processor.h>
#include <asm/smp.h>
-u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
+u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
EXPORT_SYMBOL(cpu_to_node_map);
cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c
index 6a4ac7d70b35..bc11bb096f58 100644
--- a/arch/ia64/kernel/patch.c
+++ b/arch/ia64/kernel/patch.c
@@ -115,7 +115,7 @@ ia64_patch_vtop (unsigned long start, unsigned long end)
ia64_srlz_i();
}
-void
+void __init
ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
{
static int first_time = 1;
@@ -149,7 +149,7 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
ia64_srlz_i();
}
-static void
+static void __init
patch_fsyscall_table (unsigned long start, unsigned long end)
{
extern unsigned long fsyscall_table[NR_syscalls];
@@ -166,7 +166,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end)
ia64_srlz_i();
}
-static void
+static void __init
patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
{
extern char fsys_bubble_down[];
@@ -184,7 +184,7 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
ia64_srlz_i();
}
-void
+void __init
ia64_patch_gate (void)
{
# define START(name) ((unsigned long) __start_gate_##name##_patchlist)
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 3258e09278d0..eb388e271b2b 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -41,7 +41,6 @@
#include <linux/serial_core.h>
#include <linux/efi.h>
#include <linux/initrd.h>
-#include <linux/platform.h>
#include <linux/pm.h>
#include <linux/cpufreq.h>
@@ -131,8 +130,8 @@ EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
/*
* We use a special marker for the end of memory and it uses the extra (+1) slot
*/
-struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
-int num_rsvd_regions;
+struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
+int num_rsvd_regions __initdata;
/*
@@ -141,7 +140,7 @@ int num_rsvd_regions;
* caller-specified function is called with the memory ranges that remain after filtering.
* This routine does not assume the incoming segments are sorted.
*/
-int
+int __init
filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
{
unsigned long range_start, range_end, prev_start;
@@ -177,7 +176,7 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
return 0;
}
-static void
+static void __init
sort_regions (struct rsvd_region *rsvd_region, int max)
{
int j;
@@ -218,7 +217,7 @@ __initcall(register_memory);
* initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
* see include/asm-ia64/meminit.h if you need to define more.
*/
-void
+void __init
reserve_memory (void)
{
int n = 0;
@@ -270,7 +269,7 @@ reserve_memory (void)
* Grab the initrd start and end from the boot parameter struct given us by
* the boot loader.
*/
-void
+void __init
find_initrd (void)
{
#ifdef CONFIG_BLK_DEV_INITRD
@@ -362,7 +361,7 @@ mark_bsp_online (void)
}
#ifdef CONFIG_SMP
-static void
+static void __init
check_for_logical_procs (void)
{
pal_logical_to_physical_t info;
@@ -389,6 +388,14 @@ check_for_logical_procs (void)
}
#endif
+static __initdata int nomca;
+static __init int setup_nomca(char *s)
+{
+ nomca = 1;
+ return 0;
+}
+early_param("nomca", setup_nomca);
+
void __init
setup_arch (char **cmdline_p)
{
@@ -402,35 +409,15 @@ setup_arch (char **cmdline_p)
efi_init();
io_port_init();
+ parse_early_param();
+
#ifdef CONFIG_IA64_GENERIC
- {
- const char *mvec_name = strstr (*cmdline_p, "machvec=");
- char str[64];
-
- if (mvec_name) {
- const char *end;
- size_t len;
-
- mvec_name += 8;
- end = strchr (mvec_name, ' ');
- if (end)
- len = end - mvec_name;
- else
- len = strlen (mvec_name);
- len = min(len, sizeof (str) - 1);
- strncpy (str, mvec_name, len);
- str[len] = '\0';
- mvec_name = str;
- } else
- mvec_name = acpi_get_sysname();
- machvec_init(mvec_name);
- }
+ machvec_init(NULL);
#endif
if (early_console_setup(*cmdline_p) == 0)
mark_bsp_online();
- parse_early_param();
#ifdef CONFIG_ACPI
/* Initialize the ACPI boot-time table parser */
acpi_table_init();
@@ -493,7 +480,7 @@ setup_arch (char **cmdline_p)
#endif
/* enable IA-64 Machine Check Abort Handling unless disabled */
- if (!strstr(saved_command_line, "nomca"))
+ if (!nomca)
ia64_mca_init();
platform_setup(cmdline_p);
@@ -623,7 +610,7 @@ struct seq_operations cpuinfo_op = {
.show = show_cpuinfo
};
-void
+static void __cpuinit
identify_cpu (struct cpuinfo_ia64 *c)
{
union {
@@ -700,7 +687,7 @@ setup_per_cpu_areas (void)
* In addition, the minimum of the i-cache stride sizes is calculated for
* "flush_icache_range()".
*/
-static void
+static void __cpuinit
get_max_cacheline_size (void)
{
unsigned long line_size, max = 1;
@@ -763,10 +750,10 @@ get_max_cacheline_size (void)
* cpu_init() initializes state that is per-CPU. This function acts
* as a 'CPU state barrier', nothing should get across.
*/
-void
+void __cpuinit
cpu_init (void)
{
- extern void __devinit ia64_mmu_init (void *);
+ extern void __cpuinit ia64_mmu_init (void *);
unsigned long num_phys_stacked;
pal_vm_info_2_u_t vmi;
unsigned int max_ctx;
@@ -894,7 +881,7 @@ void sched_cacheflush(void)
ia64_sal_cache_flush(3);
}
-void
+void __init
check_bugs (void)
{
ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index c4b633b36dab..44e9547878ac 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -624,32 +624,8 @@ void __devinit smp_prepare_boot_cpu(void)
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
}
-/*
- * mt_info[] is a temporary store for all info returned by
- * PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the
- * specific cpu comes.
- */
-static struct {
- __u32 socket_id;
- __u16 core_id;
- __u16 thread_id;
- __u16 proc_fixed_addr;
- __u8 valid;
-} mt_info[NR_CPUS] __devinitdata;
-
#ifdef CONFIG_HOTPLUG_CPU
static inline void
-remove_from_mtinfo(int cpu)
-{
- int i;
-
- for_each_cpu(i)
- if (mt_info[i].valid && mt_info[i].socket_id ==
- cpu_data(cpu)->socket_id)
- mt_info[i].valid = 0;
-}
-
-static inline void
clear_cpu_sibling_map(int cpu)
{
int i;
@@ -678,12 +654,6 @@ remove_siblinginfo(int cpu)
/* remove it from all sibling map's */
clear_cpu_sibling_map(cpu);
-
- /* if this cpu is the last in the core group, remove all its info
- * from mt_info structure
- */
- if (last)
- remove_from_mtinfo(cpu);
}
extern void fixup_irqs(void);
@@ -878,40 +848,6 @@ init_smp_config(void)
ia64_sal_strerror(sal_ret));
}
-static inline int __devinit
-check_for_mtinfo_index(void)
-{
- int i;
-
- for_each_cpu(i)
- if (!mt_info[i].valid)
- return i;
-
- return -1;
-}
-
-/*
- * Search the mt_info to find out if this socket's cid/tid information is
- * cached or not. If the socket exists, fill in the core_id and thread_id
- * in cpuinfo
- */
-static int __devinit
-check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c)
-{
- int i;
- __u32 sid = c->socket_id;
-
- for_each_cpu(i) {
- if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address
- && mt_info[i].socket_id == sid) {
- c->core_id = mt_info[i].core_id;
- c->thread_id = mt_info[i].thread_id;
- return 1; /* not a new socket */
- }
- }
- return 0;
-}
-
/*
* identify_siblings(cpu) gets called from identify_cpu. This populates the
* information related to logical execution units in per_cpu_data structure.
@@ -921,14 +857,12 @@ identify_siblings(struct cpuinfo_ia64 *c)
{
s64 status;
u16 pltid;
- u64 proc_fixed_addr;
- int count, i;
pal_logical_to_physical_t info;
if (smp_num_cpucores == 1 && smp_num_siblings == 1)
return;
- if ((status = ia64_pal_logical_to_phys(0, &info)) != PAL_STATUS_SUCCESS) {
+ if ((status = ia64_pal_logical_to_phys(-1, &info)) != PAL_STATUS_SUCCESS) {
printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
status);
return;
@@ -937,47 +871,12 @@ identify_siblings(struct cpuinfo_ia64 *c)
printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status);
return;
}
- if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != PAL_STATUS_SUCCESS) {
- printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", status);
- return;
- }
c->socket_id = (pltid << 8) | info.overview_ppid;
c->cores_per_socket = info.overview_cpp;
c->threads_per_core = info.overview_tpc;
- count = c->num_log = info.overview_num_log;
+ c->num_log = info.overview_num_log;
- /* If the thread and core id information is already cached, then
- * we will simply update cpu_info and return. Otherwise, we will
- * do the PAL calls and cache core and thread id's of all the siblings.
- */
- if (check_for_new_socket(proc_fixed_addr, c))
- return;
-
- for (i = 0; i < count; i++) {
- int index;
-
- if (i && (status = ia64_pal_logical_to_phys(i, &info))
- != PAL_STATUS_SUCCESS) {
- printk(KERN_ERR "ia64_pal_logical_to_phys failed"
- " with %ld\n", status);
- return;
- }
- if (info.log2_la == proc_fixed_addr) {
- c->core_id = info.log1_cid;
- c->thread_id = info.log1_tid;
- }
-
- index = check_for_mtinfo_index();
- /* We will not do the mt_info caching optimization in this case.
- */
- if (index < 0)
- continue;
-
- mt_info[index].valid = 1;
- mt_info[index].socket_id = c->socket_id;
- mt_info[index].core_id = info.log1_cid;
- mt_info[index].thread_id = info.log1_tid;
- mt_info[index].proc_fixed_addr = info.log2_la;
- }
+ c->core_id = info.log1_cid;
+ c->thread_id = info.log1_tid;
}
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 73af6267d2ef..0b9e56dd7f05 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -70,34 +70,9 @@ SECTIONS
__stop___ex_table = .;
}
- .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
- {
- __start___vtop_patchlist = .;
- *(.data.patch.vtop)
- __end___vtop_patchlist = .;
- }
-
- .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
- {
- __start___mckinley_e9_bundles = .;
- *(.data.patch.mckinley_e9)
- __end___mckinley_e9_bundles = .;
- }
-
/* Global data */
_data = .;
-#if defined(CONFIG_IA64_GENERIC)
- /* Machine Vector */
- . = ALIGN(16);
- .machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
- {
- machvec_start = .;
- *(.machvec)
- machvec_end = .;
- }
-#endif
-
/* Unwind info & table: */
. = ALIGN(8);
.IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET)
@@ -154,6 +129,41 @@ SECTIONS
*(.initcall7.init)
__initcall_end = .;
}
+
+ /* MCA table */
+ . = ALIGN(16);
+ __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET)
+ {
+ __start___mca_table = .;
+ *(__mca_table)
+ __stop___mca_table = .;
+ }
+
+ .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
+ {
+ __start___vtop_patchlist = .;
+ *(.data.patch.vtop)
+ __end___vtop_patchlist = .;
+ }
+
+ .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
+ {
+ __start___mckinley_e9_bundles = .;
+ *(.data.patch.mckinley_e9)
+ __end___mckinley_e9_bundles = .;
+ }
+
+#if defined(CONFIG_IA64_GENERIC)
+ /* Machine Vector */
+ . = ALIGN(16);
+ .machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
+ {
+ machvec_start = .;
+ *(.machvec)
+ machvec_end = .;
+ }
+#endif
+
__con_initcall_start = .;
.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
{ *(.con_initcall.init) }
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 9855ba318094..84fd1c14c8a9 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -97,7 +97,7 @@ find_max_pfn (unsigned long start, unsigned long end, void *arg)
* Find a place to put the bootmap and return its starting address in
* bootmap_start. This address must be page-aligned.
*/
-int
+static int __init
find_bootmap_location (unsigned long start, unsigned long end, void *arg)
{
unsigned long needed = *(unsigned long *)arg;
@@ -141,7 +141,7 @@ find_bootmap_location (unsigned long start, unsigned long end, void *arg)
* Walk the EFI memory map and find usable memory for the system, taking
* into account reserved areas.
*/
-void
+void __init
find_memory (void)
{
unsigned long bootmap_size;
@@ -176,7 +176,7 @@ find_memory (void)
*
* Allocate and setup per-cpu data areas.
*/
-void *
+void * __cpuinit
per_cpu_init (void)
{
void *cpu_data;
@@ -228,7 +228,7 @@ count_dma_pages (u64 start, u64 end, void *arg)
* Set up the page tables.
*/
-void
+void __init
paging_init (void)
{
unsigned long max_dma;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 573d5cc63e2b..2f5e44862e91 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -525,7 +525,7 @@ void __init find_memory(void)
* find_pernode_space() does most of this already, we just need to set
* local_per_cpu_offset
*/
-void *per_cpu_init(void)
+void __cpuinit *per_cpu_init(void)
{
int cpu;
static int first_time = 1;
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 9dbc7dadd165..8d506710fdbd 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -113,8 +113,7 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb,
unsigned long floor, unsigned long ceiling)
{
/*
- * This is called only when is_hugepage_only_range(addr,),
- * and it follows that is_hugepage_only_range(end,) also.
+ * This is called to free hugetlb page tables.
*
* The offset of these addresses from the base of the hugetlb
* region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
@@ -126,9 +125,9 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb,
addr = htlbpage_to_page(addr);
end = htlbpage_to_page(end);
- if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE))
+ if (REGION_NUMBER(floor) == RGN_HPAGE)
floor = htlbpage_to_page(floor);
- if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE))
+ if (REGION_NUMBER(ceiling) == RGN_HPAGE)
ceiling = htlbpage_to_page(ceiling);
free_pgd_range(tlb, addr, end, floor, ceiling);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 08d94e6bfa18..ff4f31fcd330 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -206,7 +206,7 @@ free_initmem (void)
(__init_end - __init_begin) >> 10);
}
-void
+void __init
free_initrd_mem (unsigned long start, unsigned long end)
{
struct page *page;
@@ -261,7 +261,7 @@ free_initrd_mem (unsigned long start, unsigned long end)
/*
* This installs a clean page in the kernel's page table.
*/
-struct page *
+static struct page * __init
put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
{
pgd_t *pgd;
@@ -294,7 +294,7 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
return page;
}
-static void
+static void __init
setup_gate (void)
{
struct page *page;
@@ -411,7 +411,7 @@ ia64_mmu_init (void *my_cpu_data)
#ifdef CONFIG_VIRTUAL_MEM_MAP
-int
+int __init
create_mem_map_page_table (u64 start, u64 end, void *arg)
{
unsigned long address, start_page, end_page;
@@ -519,7 +519,7 @@ ia64_pfn_valid (unsigned long pfn)
}
EXPORT_SYMBOL(ia64_pfn_valid);
-int
+int __init
find_largest_hole (u64 start, u64 end, void *arg)
{
u64 *max_gap = arg;
@@ -535,7 +535,7 @@ find_largest_hole (u64 start, u64 end, void *arg)
}
#endif /* CONFIG_VIRTUAL_MEM_MAP */
-static int
+static int __init
count_reserved_pages (u64 start, u64 end, void *arg)
{
unsigned long num_reserved = 0;
@@ -556,7 +556,7 @@ count_reserved_pages (u64 start, u64 end, void *arg)
* purposes.
*/
-static int nolwsys;
+static int nolwsys __initdata;
static int __init
nolwsys_setup (char *s)
@@ -567,7 +567,7 @@ nolwsys_setup (char *s)
__setup("nolwsys", nolwsys_setup);
-void
+void __init
mem_init (void)
{
long reserved_pages, codesize, datasize, initsize;
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index 1f11db470d90..e952ef4f6d91 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -36,7 +36,7 @@ static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface)
nodepda_t *tmp_nodepda;
if (nasid_to_cnodeid(nasid) == -1)
- return (struct bteinfo_s *)NULL;;
+ return (struct bteinfo_s *)NULL;
tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid));
return &tmp_nodepda->bte_if[interface];
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index dfb3f2902379..5101ac462643 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -13,6 +13,8 @@
#include <asm/sn/sn_feature_sets.h>
#include <asm/sn/geo.h>
#include <asm/sn/io.h>
+#include <asm/sn/l1.h>
+#include <asm/sn/module.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
@@ -710,9 +712,36 @@ cnodeid_get_geoid(cnodeid_t cnode)
return hubdev->hdi_geoid;
}
+void sn_generate_path(struct pci_bus *pci_bus, char *address)
+{
+ nasid_t nasid;
+ cnodeid_t cnode;
+ geoid_t geoid;
+ moduleid_t moduleid;
+ u16 bricktype;
+
+ nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
+ cnode = nasid_to_cnodeid(nasid);
+ geoid = cnodeid_get_geoid(cnode);
+ moduleid = geo_module(geoid);
+
+ sprintf(address, "module_%c%c%c%c%.2d",
+ '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)),
+ '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)),
+ '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)),
+ MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid));
+
+ /* Tollhouse requires slot id to be displayed */
+ bricktype = MODULE_GET_BTYPE(moduleid);
+ if ((bricktype == L1_BRICKTYPE_191010) ||
+ (bricktype == L1_BRICKTYPE_1932))
+ sprintf(address, "%s^%d", address, geo_slot(geoid));
+}
+
subsys_initcall(sn_pci_init);
EXPORT_SYMBOL(sn_pci_fixup_slot);
EXPORT_SYMBOL(sn_pci_unfixup_slot);
EXPORT_SYMBOL(sn_pci_controller_fixup);
EXPORT_SYMBOL(sn_bus_store_sysdata);
EXPORT_SYMBOL(sn_bus_free_sysdata);
+EXPORT_SYMBOL(sn_generate_path);
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index c373113d073a..c265e02f5036 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -350,9 +350,6 @@ static void force_interrupt(int irq)
static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
{
u64 regval;
- int irr_reg_num;
- int irr_bit;
- u64 irr_reg;
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
@@ -373,23 +370,7 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
pdi_pcibus_info;
regval = pcireg_intr_status_get(pcibus_info);
- irr_reg_num = irq_to_vector(irq) / 64;
- irr_bit = irq_to_vector(irq) % 64;
- switch (irr_reg_num) {
- case 0:
- irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
- break;
- case 1:
- irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
- break;
- case 2:
- irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
- break;
- case 3:
- irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
- break;
- }
- if (!test_bit(irr_bit, &irr_reg)) {
+ if (!ia64_get_irr(irq_to_vector(irq))) {
if (!test_bit(irq, pda->sn_in_service_ivecs)) {
regval &= 0xff;
if (sn_irq_info->irq_int_bit & regval &
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index 99cb28e74295..feaf1a6e8101 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -369,9 +369,15 @@ static void tio_corelet_reset(nasid_t nasid, int corelet)
static int is_fpga_tio(int nasid, int *bt)
{
- int ioboard_type;
+ u16 ioboard_type;
+ s64 rc;
- ioboard_type = ia64_sn_sysctl_ioboard_get(nasid);
+ rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard_type);
+ if (rc) {
+ printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n",
+ rc);
+ return 0;
+ }
switch (ioboard_type) {
case L1_BRICKTYPE_SA:
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index 98f716bd92f0..ab1211ef0176 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -74,6 +74,22 @@ static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
return (int)ret_stuff.v0;
}
+u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
+{
+ s64 rc;
+ u16 ioboard;
+ nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
+
+ rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard);
+ if (rc) {
+ printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n",
+ rc);
+ return 0;
+ }
+
+ return ioboard;
+}
+
/*
* PCI Bridge Error interrupt handler. Gets invoked whenever a PCI
* bridge sends an error interrupt.
@@ -255,3 +271,4 @@ pcibr_init_provider(void)
EXPORT_SYMBOL_GPL(sal_pcibr_slot_enable);
EXPORT_SYMBOL_GPL(sal_pcibr_slot_disable);
+EXPORT_SYMBOL_GPL(sn_ioboard_to_pci_bus);
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 7571a4025529..be0176912968 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -377,7 +377,7 @@ tioca_dma_mapped(struct pci_dev *pdev, u64 paddr, size_t req_size)
struct tioca_dmamap *ca_dmamap;
void *map;
unsigned long flags;
- struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);;
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
diff --git a/arch/m32r/Kconfig.debug b/arch/m32r/Kconfig.debug
index bbf711bab69e..2e1019ddbb22 100644
--- a/arch/m32r/Kconfig.debug
+++ b/arch/m32r/Kconfig.debug
@@ -19,7 +19,7 @@ config DEBUG_STACK_USAGE
This option will slow down process creation somewhat.
config DEBUG_PAGEALLOC
- bool "Page alloc debugging"
+ bool "Debug page memory allocations"
depends on DEBUG_KERNEL && BROKEN
help
Unmap pages from the kernel linear mapping after free_pages().
diff --git a/arch/m32r/Makefile b/arch/m32r/Makefile
index 4b3c90ba926c..f219c47d334f 100644
--- a/arch/m32r/Makefile
+++ b/arch/m32r/Makefile
@@ -1,6 +1,9 @@
#
# m32r/Makefile
#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
LDFLAGS :=
OBJCOPYFLAGS := -O binary -R .note -R .comment -S
@@ -39,7 +42,7 @@ drivers-$(CONFIG_OPROFILE) += arch/m32r/oprofile/
boot := arch/m32r/boot
-.PHONY: zImage
+PHONY += zImage
all: zImage
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 2d8ad0727b6b..33648efb772e 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -77,7 +77,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
/*
* The idle loop on an m68k..
*/
-void default_idle(void)
+static void default_idle(void)
{
if (!need_resched())
#if defined(MACH_ATARI_ONLY) && !defined(CONFIG_HADES)
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c
index 63c117dae0c3..f861755ec88b 100644
--- a/arch/m68knommu/kernel/process.c
+++ b/arch/m68knommu/kernel/process.c
@@ -51,7 +51,7 @@ EXPORT_SYMBOL(pm_power_off);
/*
* The idle loop on an m68knommu..
*/
-void default_idle(void)
+static void default_idle(void)
{
local_irq_disable();
while (!need_resched()) {
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 0fc3730a294f..5407b784cd01 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -645,27 +645,7 @@ static inline void getitimer_real(struct itimerval *value)
asmlinkage unsigned int irix_alarm(unsigned int seconds)
{
- struct itimerval it_new, it_old;
- unsigned int oldalarm;
-
- if (!seconds) {
- getitimer_real(&it_old);
- del_timer(&current->real_timer);
- } else {
- it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
- it_new.it_value.tv_sec = seconds;
- it_new.it_value.tv_usec = 0;
- do_setitimer(ITIMER_REAL, &it_new, &it_old);
- }
- oldalarm = it_old.it_value.tv_sec;
- /*
- * ehhh.. We can't return 0 if we have an alarm pending ...
- * And we'd better return too much than too little anyway
- */
- if (it_old.it_value.tv_usec)
- oldalarm++;
-
- return oldalarm;
+ return alarm_setitimer(seconds);
}
asmlinkage int irix_pause(void)
diff --git a/arch/mips/mm/dma-ip32.c b/arch/mips/mm/dma-ip32.c
index a7e3072ff78d..ec54ed0d26ff 100644
--- a/arch/mips/mm/dma-ip32.c
+++ b/arch/mips/mm/dma-ip32.c
@@ -138,7 +138,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
BUG();
}
- addr = virt_to_phys(ptr)&RAM_OFFSET_MASK;;
+ addr = virt_to_phys(ptr)&RAM_OFFSET_MASK;
if(dev == NULL)
addr+=CRIME_HI_MEM_BASE;
return (dma_addr_t)addr;
@@ -179,7 +179,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
addr = (unsigned long) page_address(sg->page)+sg->offset;
if (addr)
__dma_sync(addr, sg->length, direction);
- addr = __pa(addr)&RAM_OFFSET_MASK;;
+ addr = __pa(addr)&RAM_OFFSET_MASK;
if(dev == NULL)
addr += CRIME_HI_MEM_BASE;
sg->dma_address = (dma_addr_t)addr;
@@ -199,7 +199,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
addr = (unsigned long) page_address(page) + offset;
dma_cache_wback_inv(addr, size);
- addr = __pa(addr)&RAM_OFFSET_MASK;;
+ addr = __pa(addr)&RAM_OFFSET_MASK;
if(dev == NULL)
addr += CRIME_HI_MEM_BASE;
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index e8dea4177113..0b485ef4be89 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -54,11 +54,6 @@
#include <asm/uaccess.h>
#include <asm/unwind.h>
-void default_idle(void)
-{
- barrier();
-}
-
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index a3fc7a23158f..829e017b8a54 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -150,7 +150,7 @@ CPPFLAGS_vmlinux.lds := -Upowerpc
BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm uImage vmlinux.bin
-.PHONY: $(BOOT_TARGETS)
+PHONY += $(BOOT_TARGETS)
boot := arch/$(ARCH)/boot
diff --git a/arch/ppc/8xx_io/cs4218_tdm.c b/arch/ppc/8xx_io/cs4218_tdm.c
index 49eb2a7e65c0..a892356d5c3b 100644
--- a/arch/ppc/8xx_io/cs4218_tdm.c
+++ b/arch/ppc/8xx_io/cs4218_tdm.c
@@ -126,11 +126,11 @@ static int numReadBufs = 4, readbufSize = 32;
*/
static volatile cbd_t *rx_base, *rx_cur, *tx_base, *tx_cur;
-MODULE_PARM(catchRadius, "i");
-MODULE_PARM(numBufs, "i");
-MODULE_PARM(bufSize, "i");
-MODULE_PARM(numreadBufs, "i");
-MODULE_PARM(readbufSize, "i");
+module_param(catchRadius, int, 0);
+module_param(numBufs, int, 0);
+module_param(bufSize, int, 0);
+module_param(numreadBufs, int, 0);
+module_param(readbufSize, int, 0);
#define arraysize(x) (sizeof(x)/sizeof(*(x)))
#define le2be16(x) (((x)<<8 & 0xff00) | ((x)>>8 & 0x00ff))
diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile
index 98e940beeb3b..9fbdf54ba2be 100644
--- a/arch/ppc/Makefile
+++ b/arch/ppc/Makefile
@@ -82,7 +82,7 @@ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
-.PHONY: $(BOOT_TARGETS)
+PHONY += $(BOOT_TARGETS)
all: uImage zImage
diff --git a/arch/ppc/boot/Makefile b/arch/ppc/boot/Makefile
index f565699a9fe0..84eec0bef93c 100644
--- a/arch/ppc/boot/Makefile
+++ b/arch/ppc/boot/Makefile
@@ -1,3 +1,9 @@
+#
+# arch/ppc/boot/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
@@ -22,7 +28,7 @@ subdir- += simple openfirmware
hostprogs-y := $(addprefix utils/, addnote mknote hack-coff mkprep mkbugboot mktree)
-.PHONY: $(BOOT_TARGETS) $(bootdir-y)
+PHONY += $(BOOT_TARGETS) $(bootdir-y)
$(BOOT_TARGETS): $(bootdir-y)
diff --git a/arch/ppc/boot/openfirmware/Makefile b/arch/ppc/boot/openfirmware/Makefile
index 2a411ec2e650..66b739743759 100644
--- a/arch/ppc/boot/openfirmware/Makefile
+++ b/arch/ppc/boot/openfirmware/Makefile
@@ -1,5 +1,8 @@
# Makefile for making bootable images on various OpenFirmware machines.
#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
# Paul Mackerras January 1997
# XCOFF bootable images for PowerMacs
# Geert Uytterhoeven September 1997
@@ -86,7 +89,7 @@ $(images)/zImage.chrp-rs6k $(images)/zImage.initrd.chrp-rs6k: \
# The targets used on the make command-line
-.PHONY: zImage zImage.initrd
+PHONY += zImage zImage.initrd
zImage: $(images)/zImage.chrp \
$(images)/zImage.chrp-rs6k
@echo ' kernel: $@ is ready ($<)'
@@ -96,7 +99,7 @@ zImage.initrd: $(images)/zImage.initrd.chrp \
TFTPIMAGE := /tftpboot/zImage
-.PHONY: znetboot znetboot.initrd
+PHONY += znetboot znetboot.initrd
znetboot: $(images)/zImage.chrp
cp $(images)/zImage.chrp $(TFTPIMAGE).chrp$(END)
@echo ' kernel: $@ is ready ($<)'
diff --git a/arch/ppc/syslib/ppc85xx_setup.c b/arch/ppc/syslib/ppc85xx_setup.c
index e70b34ee6275..79b7089d7500 100644
--- a/arch/ppc/syslib/ppc85xx_setup.c
+++ b/arch/ppc/syslib/ppc85xx_setup.c
@@ -235,7 +235,7 @@ mpc85xx_setup_pci2(struct pci_controller *hose)
(__ilog2(MPC85XX_PCI2_UPPER_MEM - MPC85XX_PCI2_LOWER_MEM + 1) - 1);
/* Setup outbound IO windows @ MPC85XX_PCI2_IO_BASE */
- pci->potar2 = (MPC85XX_PCI2_LOWER_IO >> 12) & 0x000fffff;;
+ pci->potar2 = (MPC85XX_PCI2_LOWER_IO >> 12) & 0x000fffff;
pci->potear2 = 0x00000000;
pci->powbar2 = (MPC85XX_PCI2_IO_BASE >> 12) & 0x000fffff;
/* Enable, IO R/W */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b7ca5bf9acfc..2b7364ed23bc 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -460,6 +460,8 @@ config PCMCIA
source "drivers/base/Kconfig"
+source "drivers/connector/Kconfig"
+
source "drivers/scsi/Kconfig"
source "drivers/s390/Kconfig"
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index d06a8d71c71d..54d35c130907 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -531,12 +531,11 @@ int appldata_register_ops(struct appldata_ops *ops)
P_ERROR("ctl_nr %i already in use!\n", ops->ctl_nr);
return -EBUSY;
}
- ops->ctl_table = kmalloc(4*sizeof(struct ctl_table), GFP_KERNEL);
+ ops->ctl_table = kzalloc(4*sizeof(struct ctl_table), GFP_KERNEL);
if (ops->ctl_table == NULL) {
P_ERROR("Not enough memory for %s ctl_table!\n", ops->name);
return -ENOMEM;
}
- memset(ops->ctl_table, 0, 4*sizeof(struct ctl_table));
spin_lock(&appldata_ops_lock);
list_for_each(lh, &appldata_ops_list) {
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 896d39d0e4ce..06a3fbc12536 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -204,16 +204,13 @@ debug_areas_alloc(int pages_per_area, int nr_areas)
goto fail_malloc_areas2;
}
for(j = 0; j < pages_per_area; j++) {
- areas[i][j] = (debug_entry_t*)kmalloc(PAGE_SIZE,
- GFP_KERNEL);
+ areas[i][j] = kzalloc(PAGE_SIZE, GFP_KERNEL);
if(!areas[i][j]) {
for(j--; j >=0 ; j--) {
kfree(areas[i][j]);
}
kfree(areas[i]);
goto fail_malloc_areas2;
- } else {
- memset(areas[i][j],0,PAGE_SIZE);
}
}
}
@@ -249,14 +246,12 @@ debug_info_alloc(char *name, int pages_per_area, int nr_areas, int buf_size,
rc = (debug_info_t*) kmalloc(sizeof(debug_info_t), GFP_KERNEL);
if(!rc)
goto fail_malloc_rc;
- rc->active_entries = (int*)kmalloc(nr_areas * sizeof(int), GFP_KERNEL);
+ rc->active_entries = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
if(!rc->active_entries)
goto fail_malloc_active_entries;
- memset(rc->active_entries, 0, nr_areas * sizeof(int));
- rc->active_pages = (int*)kmalloc(nr_areas * sizeof(int), GFP_KERNEL);
+ rc->active_pages = kcalloc(nr_areas, sizeof(int), GFP_KERNEL);
if(!rc->active_pages)
goto fail_malloc_active_pages;
- memset(rc->active_pages, 0, nr_areas * sizeof(int));
if((mode == ALL_AREAS) && (pages_per_area != 0)){
rc->areas = debug_areas_alloc(pages_per_area, nr_areas);
if(!rc->areas)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index da6fbae8df91..99182a415fe7 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -103,7 +103,7 @@ extern void s390_handle_mcck(void);
/*
* The idle loop on a S390...
*/
-void default_idle(void)
+static void default_idle(void)
{
int cpu, rc;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 24f62f16c0e5..0a04e4a564b2 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -78,8 +78,6 @@ extern int _text,_etext, _edata, _end;
#include <asm/setup.h>
-static char command_line[COMMAND_LINE_SIZE] = { 0, };
-
static struct resource code_resource = {
.name = "Kernel code",
.start = (unsigned long) &_text,
@@ -335,63 +333,38 @@ add_memory_hole(unsigned long start, unsigned long end)
}
}
-static void __init
-parse_cmdline_early(char **cmdline_p)
+static int __init early_parse_mem(char *p)
+{
+ memory_end = memparse(p, &p);
+ return 0;
+}
+early_param("mem", early_parse_mem);
+
+/*
+ * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
+ */
+static int __init early_parse_ipldelay(char *p)
{
- char c = ' ', cn, *to = command_line, *from = COMMAND_LINE;
unsigned long delay = 0;
- /* Save unparsed command line copy for /proc/cmdline */
- memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
- saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+ delay = simple_strtoul(p, &p, 0);
- for (;;) {
- /*
- * "mem=XXX[kKmM]" sets memsize
- */
- if (c == ' ' && strncmp(from, "mem=", 4) == 0) {
- memory_end = simple_strtoul(from+4, &from, 0);
- if ( *from == 'K' || *from == 'k' ) {
- memory_end = memory_end << 10;
- from++;
- } else if ( *from == 'M' || *from == 'm' ) {
- memory_end = memory_end << 20;
- from++;
- }
- }
- /*
- * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
- */
- if (c == ' ' && strncmp(from, "ipldelay=", 9) == 0) {
- delay = simple_strtoul(from+9, &from, 0);
- if (*from == 's' || *from == 'S') {
- delay = delay*1000000;
- from++;
- } else if (*from == 'm' || *from == 'M') {
- delay = delay*60*1000000;
- from++;
- }
- /* now wait for the requested amount of time */
- udelay(delay);
- }
- cn = *(from++);
- if (!cn)
- break;
- if (cn == '\n')
- cn = ' '; /* replace newlines with space */
- if (cn == 0x0d)
- cn = ' '; /* replace 0x0d with space */
- if (cn == ' ' && c == ' ')
- continue; /* remove additional spaces */
- c = cn;
- if (to - command_line >= COMMAND_LINE_SIZE)
- break;
- *(to++) = c;
+ switch (*p) {
+ case 's':
+ case 'S':
+ delay *= 1000000;
+ break;
+ case 'm':
+ case 'M':
+ delay *= 60 * 1000000;
}
- if (c == ' ' && to > command_line) to--;
- *to = '\0';
- *cmdline_p = command_line;
+
+ /* now wait for the requested amount of time */
+ udelay(delay);
+
+ return 0;
}
+early_param("ipldelay", early_parse_ipldelay);
static void __init
setup_lowcore(void)
@@ -580,9 +553,26 @@ setup_arch(char **cmdline_p)
"We are running native (64 bit mode)\n");
#endif /* CONFIG_64BIT */
+ /* Save unparsed command line copy for /proc/cmdline */
+ strlcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
+
+ *cmdline_p = COMMAND_LINE;
+ *(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
+
ROOT_DEV = Root_RAM0;
+
+ init_mm.start_code = PAGE_OFFSET;
+ init_mm.end_code = (unsigned long) &_etext;
+ init_mm.end_data = (unsigned long) &_edata;
+ init_mm.brk = (unsigned long) &_end;
+
+ memory_end = memory_size;
+
+ parse_early_param();
+
#ifndef CONFIG_64BIT
- memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */
+ memory_end &= ~0x400000UL;
+
/*
* We need some free virtual space to be able to do vmalloc.
* On a machine with 2GB memory we make sure that we have at
@@ -591,17 +581,9 @@ setup_arch(char **cmdline_p)
if (memory_end > 1920*1024*1024)
memory_end = 1920*1024*1024;
#else /* CONFIG_64BIT */
- memory_end = memory_size & ~0x200000UL; /* detected in head.s */
+ memory_end &= ~0x200000UL;
#endif /* CONFIG_64BIT */
- init_mm.start_code = PAGE_OFFSET;
- init_mm.end_code = (unsigned long) &_etext;
- init_mm.end_data = (unsigned long) &_edata;
- init_mm.brk = (unsigned long) &_end;
-
- parse_cmdline_early(cmdline_p);
- parse_early_param();
-
setup_memory();
setup_resources();
setup_lowcore();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index d52d6d211d9f..2b8841f85534 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -665,7 +665,9 @@ __cpu_up(unsigned int cpu)
cpu_lowcore->current_task = (unsigned long) idle;
cpu_lowcore->cpu_data.cpu_nr = cpu;
eieio();
- signal_processor(cpu,sigp_restart);
+
+ while (signal_processor(cpu,sigp_restart) == sigp_busy)
+ udelay(10);
while (!cpu_online(cpu))
cpu_relax();
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index b075ab499d05..51596f429235 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -339,19 +339,19 @@ static struct ctl_table cmm_table[] = {
{
.ctl_name = VM_CMM_PAGES,
.procname = "cmm_pages",
- .mode = 0600,
+ .mode = 0644,
.proc_handler = &cmm_pages_handler,
},
{
.ctl_name = VM_CMM_TIMED_PAGES,
.procname = "cmm_timed_pages",
- .mode = 0600,
+ .mode = 0644,
.proc_handler = &cmm_pages_handler,
},
{
.ctl_name = VM_CMM_TIMEOUT,
.procname = "cmm_timeout",
- .mode = 0600,
+ .mode = 0644,
.proc_handler = &cmm_timeout_handler,
},
{ .ctl_name = 0 }
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 08c9515c4806..c72e17a96eed 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -172,7 +172,7 @@ include/asm-sh/.mach: $(wildcard include/config/sh/*.h) include/config/MARKER
archprepare: maketools include/asm-sh/.cpu include/asm-sh/.mach
-.PHONY: maketools FORCE
+PHONY += maketools FORCE
maketools: include/linux/version.h FORCE
$(Q)$(MAKE) $(build)=arch/sh/tools include/asm-sh/machtypes.h
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 9fd1723e6219..22dc9c21201d 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -19,7 +19,6 @@
#include <linux/slab.h>
#include <linux/pm.h>
#include <linux/ptrace.h>
-#include <linux/platform.h>
#include <linux/kallsyms.h>
#include <linux/kexec.h>
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index f944b58cdfe7..7c58fc1a39c4 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -23,7 +23,6 @@ menu "General machine setup"
config SMP
bool "Symmetric multi-processing support (does not work on sun4/sun4c)"
- depends on BROKEN
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index 4c60a6ef54a9..aac8af5aae51 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -154,9 +154,11 @@ void (*sparc_init_timers)(irqreturn_t (*)(int, void *,struct pt_regs *)) =
struct irqaction static_irqaction[MAX_STATIC_ALLOC];
int static_irq_count;
-struct irqaction *irq_action[NR_IRQS] = {
- [0 ... (NR_IRQS-1)] = NULL
-};
+struct {
+ struct irqaction *action;
+ int flags;
+} sparc_irq[NR_IRQS];
+#define SPARC_IRQ_INPROGRESS 1
/* Used to protect the IRQ action lists */
DEFINE_SPINLOCK(irq_action_lock);
@@ -177,7 +179,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
spin_lock_irqsave(&irq_action_lock, flags);
if (i < NR_IRQS) {
- action = *(i + irq_action);
+ action = sparc_irq[i].action;
if (!action)
goto out_unlock;
seq_printf(p, "%3d: ", i);
@@ -186,7 +188,7 @@ int show_interrupts(struct seq_file *p, void *v)
#else
for_each_online_cpu(j) {
seq_printf(p, "%10u ",
- kstat_cpu(cpu_logical_map(j)).irqs[i]);
+ kstat_cpu(j).irqs[i]);
}
#endif
seq_printf(p, " %c %s",
@@ -207,7 +209,7 @@ out_unlock:
void free_irq(unsigned int irq, void *dev_id)
{
struct irqaction * action;
- struct irqaction * tmp = NULL;
+ struct irqaction **actionp;
unsigned long flags;
unsigned int cpu_irq;
@@ -225,7 +227,8 @@ void free_irq(unsigned int irq, void *dev_id)
spin_lock_irqsave(&irq_action_lock, flags);
- action = *(cpu_irq + irq_action);
+ actionp = &sparc_irq[cpu_irq].action;
+ action = *actionp;
if (!action->handler) {
printk("Trying to free free IRQ%d\n",irq);
@@ -235,7 +238,7 @@ void free_irq(unsigned int irq, void *dev_id)
for (; action; action = action->next) {
if (action->dev_id == dev_id)
break;
- tmp = action;
+ actionp = &action->next;
}
if (!action) {
printk("Trying to free free shared IRQ%d\n",irq);
@@ -254,11 +257,8 @@ void free_irq(unsigned int irq, void *dev_id)
irq, action->name);
goto out_unlock;
}
-
- if (action && tmp)
- tmp->next = action->next;
- else
- *(cpu_irq + irq_action) = action->next;
+
+ *actionp = action->next;
spin_unlock_irqrestore(&irq_action_lock, flags);
@@ -268,7 +268,7 @@ void free_irq(unsigned int irq, void *dev_id)
kfree(action);
- if (!(*(cpu_irq + irq_action)))
+ if (!sparc_irq[cpu_irq].action)
disable_irq(irq);
out_unlock:
@@ -287,8 +287,11 @@ EXPORT_SYMBOL(free_irq);
#ifdef CONFIG_SMP
void synchronize_irq(unsigned int irq)
{
- printk("synchronize_irq says: implement me!\n");
- BUG();
+ unsigned int cpu_irq;
+
+ cpu_irq = irq & (NR_IRQS - 1);
+ while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS)
+ cpu_relax();
}
#endif /* SMP */
@@ -299,7 +302,7 @@ void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
unsigned int cpu_irq;
cpu_irq = irq & (NR_IRQS - 1);
- action = *(cpu_irq + irq_action);
+ action = sparc_irq[cpu_irq].action;
printk("IO device interrupt, irq = %d\n", irq);
printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
@@ -330,7 +333,8 @@ void handler_irq(int irq, struct pt_regs * regs)
if(irq < 10)
smp4m_irq_rotate(cpu);
#endif
- action = *(irq + irq_action);
+ action = sparc_irq[irq].action;
+ sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS;
kstat_cpu(cpu).irqs[irq]++;
do {
if (!action || !action->handler)
@@ -338,6 +342,7 @@ void handler_irq(int irq, struct pt_regs * regs)
action->handler(irq, action->dev_id, regs);
action = action->next;
} while (action);
+ sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS;
enable_pil_irq(irq);
irq_exit();
}
@@ -389,7 +394,7 @@ int request_fast_irq(unsigned int irq,
spin_lock_irqsave(&irq_action_lock, flags);
- action = *(cpu_irq + irq_action);
+ action = sparc_irq[cpu_irq].action;
if(action) {
if(action->flags & SA_SHIRQ)
panic("Trying to register fast irq when already shared.\n");
@@ -452,7 +457,7 @@ int request_fast_irq(unsigned int irq,
action->dev_id = NULL;
action->next = NULL;
- *(cpu_irq + irq_action) = action;
+ sparc_irq[cpu_irq].action = action;
enable_irq(irq);
@@ -467,7 +472,7 @@ int request_irq(unsigned int irq,
irqreturn_t (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags, const char * devname, void *dev_id)
{
- struct irqaction * action, *tmp = NULL;
+ struct irqaction * action, **actionp;
unsigned long flags;
unsigned int cpu_irq;
int ret;
@@ -490,20 +495,20 @@ int request_irq(unsigned int irq,
spin_lock_irqsave(&irq_action_lock, flags);
- action = *(cpu_irq + irq_action);
+ actionp = &sparc_irq[cpu_irq].action;
+ action = *actionp;
if (action) {
- if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
- for (tmp = action; tmp->next; tmp = tmp->next);
- } else {
+ if (!(action->flags & SA_SHIRQ) || !(irqflags & SA_SHIRQ)) {
ret = -EBUSY;
goto out_unlock;
}
- if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
+ if ((action->flags & SA_INTERRUPT) != (irqflags & SA_INTERRUPT)) {
printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
ret = -EBUSY;
goto out_unlock;
- }
- action = NULL; /* Or else! */
+ }
+ for ( ; action; action = *actionp)
+ actionp = &action->next;
}
/* If this is flagged as statically allocated then we use our
@@ -532,10 +537,7 @@ int request_irq(unsigned int irq,
action->next = NULL;
action->dev_id = dev_id;
- if (tmp)
- tmp->next = action;
- else
- *(cpu_irq + irq_action) = action;
+ *actionp = action;
enable_irq(irq);
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index ea5682ce7031..2be812115197 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -45,6 +45,7 @@ volatile int __cpu_logical_map[NR_CPUS];
cpumask_t cpu_online_map = CPU_MASK_NONE;
cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
+cpumask_t smp_commenced_mask = CPU_MASK_NONE;
/* The only guaranteed locking primitive available on all Sparc
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
@@ -57,11 +58,6 @@ cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
/* Used to make bitops atomic */
unsigned char bitops_spinlock = 0;
-volatile unsigned long ipi_count;
-
-volatile int smp_process_available=0;
-volatile int smp_commenced = 0;
-
void __init smp_store_cpu_info(int id)
{
int cpu_node;
@@ -79,6 +75,22 @@ void __init smp_store_cpu_info(int id)
void __init smp_cpus_done(unsigned int max_cpus)
{
+ extern void smp4m_smp_done(void);
+ unsigned long bogosum = 0;
+ int cpu, num;
+
+ for (cpu = 0, num = 0; cpu < NR_CPUS; cpu++)
+ if (cpu_online(cpu)) {
+ num++;
+ bogosum += cpu_data(cpu).udelay_val;
+ }
+
+ printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+ num, bogosum/(500000/HZ),
+ (bogosum/(5000/HZ))%100);
+
+ BUG_ON(sparc_cpu_model != sun4m);
+ smp4m_smp_done();
}
void cpu_panic(void)
@@ -89,17 +101,6 @@ void cpu_panic(void)
struct linux_prom_registers smp_penguin_ctable __initdata = { 0 };
-void __init smp_boot_cpus(void)
-{
- extern void smp4m_boot_cpus(void);
- extern void smp4d_boot_cpus(void);
-
- if (sparc_cpu_model == sun4m)
- smp4m_boot_cpus();
- else
- smp4d_boot_cpus();
-}
-
void smp_send_reschedule(int cpu)
{
/* See sparc64 */
@@ -252,20 +253,61 @@ int setup_profiling_timer(unsigned int multiplier)
return 0;
}
-void __init smp_prepare_cpus(unsigned int maxcpus)
+void __init smp_prepare_cpus(unsigned int max_cpus)
{
+ extern void smp4m_boot_cpus(void);
+ int i, cpuid, ncpus, extra;
+
+ BUG_ON(sparc_cpu_model != sun4m);
+ printk("Entering SMP Mode...\n");
+
+ ncpus = 1;
+ extra = 0;
+ for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) {
+ if (cpuid == boot_cpu_id)
+ continue;
+ if (cpuid < NR_CPUS && ncpus++ < max_cpus)
+ cpu_set(cpuid, phys_cpu_present_map);
+ else
+ extra++;
+ }
+ if (max_cpus >= NR_CPUS && extra)
+ printk("Warning: NR_CPUS is too low to start all cpus\n");
+
+ smp_store_cpu_info(boot_cpu_id);
+
+ smp4m_boot_cpus();
}
void __devinit smp_prepare_boot_cpu(void)
{
- current_thread_info()->cpu = hard_smp_processor_id();
- cpu_set(smp_processor_id(), cpu_online_map);
- cpu_set(smp_processor_id(), phys_cpu_present_map);
+ int cpuid = hard_smp_processor_id();
+
+ if (cpuid >= NR_CPUS) {
+ prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
+ prom_halt();
+ }
+ if (cpuid != 0)
+ printk("boot cpu id != 0, this could work but is untested\n");
+
+ current_thread_info()->cpu = cpuid;
+ cpu_set(cpuid, cpu_online_map);
+ cpu_set(cpuid, phys_cpu_present_map);
}
int __devinit __cpu_up(unsigned int cpu)
{
- panic("smp doesn't work\n");
+ extern int smp4m_boot_one_cpu(int);
+ int ret;
+
+ ret = smp4m_boot_one_cpu(cpu);
+
+ if (!ret) {
+ cpu_set(cpu, smp_commenced_mask);
+ while (!cpu_online(cpu))
+ mb();
+ }
+ return ret;
}
void smp_bogo(struct seq_file *m)
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index 19b25399d7e4..2c21d7907635 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -136,10 +136,6 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data);
/* IRQ implementation. */
EXPORT_SYMBOL(synchronize_irq);
-/* Misc SMP information */
-EXPORT_SYMBOL(__cpu_number_map);
-EXPORT_SYMBOL(__cpu_logical_map);
-
/* CPU online map and active count. */
EXPORT_SYMBOL(cpu_online_map);
EXPORT_SYMBOL(phys_cpu_present_map);
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index cea7fc6fc6e5..ca656d9bd6fd 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -54,7 +54,7 @@ unsigned char cpu_leds[32];
unsigned char sbus_tid[32];
#endif
-extern struct irqaction *irq_action[];
+static struct irqaction *irq_action[NR_IRQS];
extern spinlock_t irq_action_lock;
struct sbus_action {
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 41bb9596be48..b141b7ee6717 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -46,14 +46,16 @@ extern volatile int smp_processors_ready;
extern int smp_num_cpus;
static int smp_highest_cpu;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
-extern struct cpuinfo_sparc cpu_data[NR_CPUS];
+extern cpuinfo_sparc cpu_data[NR_CPUS];
extern unsigned char boot_cpu_id;
extern int smp_activated;
extern volatile int __cpu_number_map[NR_CPUS];
extern volatile int __cpu_logical_map[NR_CPUS];
extern volatile unsigned long ipi_count;
extern volatile int smp_process_available;
-extern volatile int smp_commenced;
+
+extern cpumask_t smp_commenced_mask;
+
extern int __smp4d_processor_id(void);
/* #define SMP_DEBUG */
@@ -136,7 +138,7 @@ void __init smp4d_callin(void)
local_irq_enable(); /* We don't allow PIL 14 yet */
- while(!smp_commenced)
+ while (!cpu_isset(cpuid, smp_commenced_mask))
barrier();
spin_lock_irqsave(&sun4d_imsk_lock, flags);
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 1dde312eebda..70b375a4c2c2 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -40,15 +40,11 @@ extern ctxd_t *srmmu_ctx_table_phys;
extern void calibrate_delay(void);
extern volatile int smp_processors_ready;
-extern int smp_num_cpus;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern unsigned char boot_cpu_id;
-extern int smp_activated;
-extern volatile int __cpu_number_map[NR_CPUS];
-extern volatile int __cpu_logical_map[NR_CPUS];
-extern volatile unsigned long ipi_count;
-extern volatile int smp_process_available;
-extern volatile int smp_commenced;
+
+extern cpumask_t smp_commenced_mask;
+
extern int __smp4m_processor_id(void);
/*#define SMP_DEBUG*/
@@ -77,8 +73,6 @@ void __init smp4m_callin(void)
local_flush_cache_all();
local_flush_tlb_all();
- set_irq_udt(boot_cpu_id);
-
/* Get our local ticker going. */
smp_setup_percpu_timer();
@@ -95,8 +89,9 @@ void __init smp4m_callin(void)
* to call the scheduler code.
*/
/* Allow master to continue. */
- swap((unsigned long *)&cpu_callin_map[cpuid], 1);
+ swap(&cpu_callin_map[cpuid], 1);
+ /* XXX: What's up with all the flushes? */
local_flush_cache_all();
local_flush_tlb_all();
@@ -111,13 +106,14 @@ void __init smp4m_callin(void)
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
- while(!smp_commenced)
- barrier();
-
- local_flush_cache_all();
- local_flush_tlb_all();
+ while (!cpu_isset(cpuid, smp_commenced_mask))
+ mb();
local_irq_enable();
+
+ cpu_set(cpuid, cpu_online_map);
+ /* last one in gets all the interrupts (for testing) */
+ set_irq_udt(boot_cpu_id);
}
extern void init_IRQ(void);
@@ -134,102 +130,76 @@ extern unsigned long trapbase_cpu3[];
void __init smp4m_boot_cpus(void)
{
- int cpucount = 0;
- int i, mid;
+ smp_setup_percpu_timer();
+ local_flush_cache_all();
+}
- printk("Entering SMP Mode...\n");
+int smp4m_boot_one_cpu(int i)
+{
+ extern unsigned long sun4m_cpu_startup;
+ unsigned long *entry = &sun4m_cpu_startup;
+ struct task_struct *p;
+ int timeout;
+ int cpu_node;
- local_irq_enable();
- cpus_clear(cpu_present_map);
+ cpu_find_by_mid(i, &cpu_node);
+
+ /* Cook up an idler for this guy. */
+ p = fork_idle(i);
+ current_set[i] = task_thread_info(p);
+ /* See trampoline.S for details... */
+ entry += ((i-1) * 3);
- for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
- cpu_set(mid, cpu_present_map);
+ /*
+ * Initialize the contexts table
+ * Since the call to prom_startcpu() trashes the structure,
+ * we need to re-initialize it for each cpu
+ */
+ smp_penguin_ctable.which_io = 0;
+ smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
+ smp_penguin_ctable.reg_size = 0;
- for(i=0; i < NR_CPUS; i++) {
- __cpu_number_map[i] = -1;
- __cpu_logical_map[i] = -1;
+ /* whirrr, whirrr, whirrrrrrrrr... */
+ printk("Starting CPU %d at %p\n", i, entry);
+ local_flush_cache_all();
+ prom_startcpu(cpu_node,
+ &smp_penguin_ctable, 0, (char *)entry);
+
+ /* wheee... it's going... */
+ for(timeout = 0; timeout < 10000; timeout++) {
+ if(cpu_callin_map[i])
+ break;
+ udelay(200);
}
- __cpu_number_map[boot_cpu_id] = 0;
- __cpu_logical_map[0] = boot_cpu_id;
- current_thread_info()->cpu = boot_cpu_id;
+ if (!(cpu_callin_map[i])) {
+ printk("Processor %d is stuck.\n", i);
+ return -ENODEV;
+ }
- smp_store_cpu_info(boot_cpu_id);
- set_irq_udt(boot_cpu_id);
- smp_setup_percpu_timer();
local_flush_cache_all();
- if(cpu_find_by_instance(1, NULL, NULL))
- return; /* Not an MP box. */
- for(i = 0; i < NR_CPUS; i++) {
- if(i == boot_cpu_id)
- continue;
-
- if (cpu_isset(i, cpu_present_map)) {
- extern unsigned long sun4m_cpu_startup;
- unsigned long *entry = &sun4m_cpu_startup;
- struct task_struct *p;
- int timeout;
-
- /* Cook up an idler for this guy. */
- p = fork_idle(i);
- cpucount++;
- current_set[i] = task_thread_info(p);
- /* See trampoline.S for details... */
- entry += ((i-1) * 3);
-
- /*
- * Initialize the contexts table
- * Since the call to prom_startcpu() trashes the structure,
- * we need to re-initialize it for each cpu
- */
- smp_penguin_ctable.which_io = 0;
- smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
- smp_penguin_ctable.reg_size = 0;
-
- /* whirrr, whirrr, whirrrrrrrrr... */
- printk("Starting CPU %d at %p\n", i, entry);
- local_flush_cache_all();
- prom_startcpu(cpu_data(i).prom_node,
- &smp_penguin_ctable, 0, (char *)entry);
-
- /* wheee... it's going... */
- for(timeout = 0; timeout < 10000; timeout++) {
- if(cpu_callin_map[i])
- break;
- udelay(200);
- }
- if(cpu_callin_map[i]) {
- /* Another "Red Snapper". */
- __cpu_number_map[i] = i;
- __cpu_logical_map[i] = i;
- } else {
- cpucount--;
- printk("Processor %d is stuck.\n", i);
- }
- }
- if(!(cpu_callin_map[i])) {
- cpu_clear(i, cpu_present_map);
- __cpu_number_map[i] = -1;
+ return 0;
+}
+
+void __init smp4m_smp_done(void)
+{
+ int i, first;
+ int *prev;
+
+ /* setup cpu list for irq rotation */
+ first = 0;
+ prev = &first;
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_online(i)) {
+ *prev = i;
+ prev = &cpu_data(i).next;
}
}
+ *prev = first;
local_flush_cache_all();
- if(cpucount == 0) {
- printk("Error: only one Processor found.\n");
- cpu_present_map = cpumask_of_cpu(smp_processor_id());
- } else {
- unsigned long bogosum = 0;
- for_each_present_cpu(i)
- bogosum += cpu_data(i).udelay_val;
- printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
- cpucount + 1,
- bogosum/(500000/HZ),
- (bogosum/(5000/HZ))%100);
- smp_activated = 1;
- smp_num_cpus = cpucount + 1;
- }
/* Free unneeded trap tables */
- if (!cpu_isset(i, cpu_present_map)) {
+ if (!cpu_isset(1, cpu_present_map)) {
ClearPageReserved(virt_to_page(trapbase_cpu1));
init_page_count(virt_to_page(trapbase_cpu1));
free_page((unsigned long)trapbase_cpu1);
@@ -263,6 +233,9 @@ void __init smp4m_boot_cpus(void)
*/
void smp4m_irq_rotate(int cpu)
{
+ int next = cpu_data(cpu).next;
+ if (next != cpu)
+ set_irq_udt(next);
}
/* Cross calls, in order to work efficiently and atomically do all
@@ -289,7 +262,7 @@ void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
smp_cpu_in_msg[me]++;
if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
- mask = cpu_present_map;
+ mask = cpu_online_map;
if(target == MSG_ALL_BUT_SELF)
cpu_clear(me, mask);
for(i = 0; i < 4; i++) {
@@ -314,8 +287,8 @@ static struct smp_funcall {
unsigned long arg3;
unsigned long arg4;
unsigned long arg5;
- unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
- unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
+ unsigned long processors_in[SUN4M_NCPUS]; /* Set when ipi entered. */
+ unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */
} ccall_info;
static DEFINE_SPINLOCK(cross_call_lock);
@@ -324,8 +297,7 @@ static DEFINE_SPINLOCK(cross_call_lock);
void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4, unsigned long arg5)
{
- if(smp_processors_ready) {
- register int ncpus = smp_num_cpus;
+ register int ncpus = SUN4M_NCPUS;
unsigned long flags;
spin_lock_irqsave(&cross_call_lock, flags);
@@ -340,7 +312,7 @@ void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
/* Init receive/complete mapping, plus fire the IPI's off. */
{
- cpumask_t mask = cpu_present_map;
+ cpumask_t mask = cpu_online_map;
register int i;
cpu_clear(smp_processor_id(), mask);
@@ -373,7 +345,6 @@ void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
}
spin_unlock_irqrestore(&cross_call_lock, flags);
- }
}
/* Running cross calls. */
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 27b0e0ba8581..58c65cc8d0d3 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1302,7 +1302,12 @@ void __init srmmu_paging_init(void)
flush_cache_all();
srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
+#ifdef CONFIG_SMP
+ /* Stop from hanging here... */
+ local_flush_tlb_all();
+#else
flush_tlb_all();
+#endif
poke_srmmu();
#ifdef CONFIG_SUN_IO
@@ -1419,6 +1424,7 @@ static void __init init_vac_layout(void)
max_size = vac_cache_size;
if(vac_line_size < min_line_size)
min_line_size = vac_line_size;
+ //FIXME: cpus not contiguous!!
cpu++;
if (cpu >= NR_CPUS || !cpu_online(cpu))
break;
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
index 3e31be494e54..afe0a7720a26 100644
--- a/arch/sparc64/Kconfig.debug
+++ b/arch/sparc64/Kconfig.debug
@@ -24,7 +24,7 @@ config DEBUG_BOOTMEM
bool "Debug BOOTMEM initialization"
config DEBUG_PAGEALLOC
- bool "Page alloc debugging"
+ bool "Debug page memory allocations"
depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
help
Unmap pages from the kernel linear mapping after free_pages().
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index e505a4125e35..11e645c9ec50 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -727,7 +727,7 @@ void handler_irq(int irq, struct pt_regs *regs)
}
#ifdef CONFIG_BLK_DEV_FD
-extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);;
+extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);
/* XXX No easy way to include asm/floppy.h XXX */
extern unsigned char *pdma_vaddr;
diff --git a/arch/um/Makefile b/arch/um/Makefile
index c58b657f0097..8d14c7a831be 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -1,4 +1,7 @@
-#
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
# Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
# Licensed under the GPL
#
@@ -88,7 +91,7 @@ CONFIG_KERNEL_HALF_GIGS ?= 0
SIZE = (($(CONFIG_NEST_LEVEL) + $(CONFIG_KERNEL_HALF_GIGS)) * 0x20000000)
-.PHONY: linux
+PHONY += linux
all: linux
diff --git a/arch/v850/kernel/process.c b/arch/v850/kernel/process.c
index 621111ddf907..57218c76925c 100644
--- a/arch/v850/kernel/process.c
+++ b/arch/v850/kernel/process.c
@@ -37,7 +37,7 @@ extern void ret_from_fork (void);
/* The idle loop. */
-void default_idle (void)
+static void default_idle (void)
{
while (! need_resched ())
asm ("halt; nop; nop; nop; nop; nop" ::: "cc");
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index e18eb79bf855..6420baeb8c1f 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -323,7 +323,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
config NR_CPUS
int "Maximum number of CPUs (2-256)"
- range 2 256
+ range 2 255
depends on SMP
default "8"
help
@@ -364,13 +364,15 @@ config GART_IOMMU
select SWIOTLB
depends on PCI
help
- Support the IOMMU. Needed to run systems with more than 3GB of memory
- properly with 32-bit PCI devices that do not support DAC (Double Address
- Cycle). The IOMMU can be turned off at runtime with the iommu=off parameter.
- Normally the kernel will take the right choice by itself.
- This option includes a driver for the AMD Opteron/Athlon64 northbridge IOMMU
- and a software emulation used on other systems.
- If unsure, say Y.
+ Support for hardware IOMMU in AMD's Opteron/Athlon64 Processors
+ and for the bounce buffering software IOMMU.
+ Needed to run systems with more than 3GB of memory properly with
+ 32-bit PCI devices that do not support DAC (Double Address Cycle).
+ The IOMMU can be turned off at runtime with the iommu=off parameter.
+ Normally the kernel will take the right choice by itself.
+ This option includes a driver for the AMD Opteron/Athlon64 IOMMU
+ northbridge and a software emulation used on other systems without
+ hardware IOMMU. If unsure, say Y.
# need this always enabled with GART_IOMMU for the VIA workaround
config SWIOTLB
@@ -429,10 +431,10 @@ config CRASH_DUMP
config PHYSICAL_START
hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
default "0x1000000" if CRASH_DUMP
- default "0x100000"
+ default "0x200000"
help
This gives the physical address where the kernel is loaded. Normally
- for regular kernels this value is 0x100000 (1MB). But in the case
+ for regular kernels this value is 0x200000 (2MB). But in the case
of kexec on panic the fail safe kernel needs to run at a different
address than the panic-ed kernel. This option is used to set the load
address for kernels used to capture crash dump on being kexec'ed
@@ -464,6 +466,14 @@ config SECCOMP
source kernel/Kconfig.hz
+config REORDER
+ bool "Function reordering"
+ default n
+ help
+ This option enables the toolchain to reorder functions for a more
+ optimal TLB usage. If you have pretty much any version of binutils,
+ this can increase your kernel build time by roughly one minute.
+
endmenu
#
@@ -512,16 +522,6 @@ config PCI_MMCONFIG
bool "Support mmconfig PCI config space access"
depends on PCI && ACPI
-config UNORDERED_IO
- bool "Unordered IO mapping access"
- depends on EXPERIMENTAL
- help
- Use unordered stores to access IO memory mappings in device drivers.
- Still very experimental. When a driver works on IA64/ppc64/pa-risc it should
- work with this option, but it makes the drivers behave differently
- from i386. Requires that the driver writer used memory barriers
- properly.
-
source "drivers/pci/pcie/Kconfig"
source "drivers/pci/Kconfig"
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index d7fd46479c55..0fbc0283609c 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -29,12 +29,14 @@ CHECKFLAGS += -D__x86_64__ -m64
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
CFLAGS += $(cflags-y)
CFLAGS += -m64
CFLAGS += -mno-red-zone
CFLAGS += -mcmodel=kernel
CFLAGS += -pipe
+cflags-$(CONFIG_REORDER) += -ffunction-sections
# this makes reading assembly source easier, but produces worse code
# actually it makes the kernel smaller too.
CFLAGS += -fno-reorder-blocks
@@ -67,8 +69,8 @@ drivers-$(CONFIG_OPROFILE) += arch/x86_64/oprofile/
boot := arch/x86_64/boot
-.PHONY: bzImage bzlilo install archmrproper \
- fdimage fdimage144 fdimage288 archclean
+PHONY += bzImage bzlilo install archmrproper \
+ fdimage fdimage144 fdimage288 archclean
#Default target when executing "make"
all: bzImage
diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig
index ce4de61ed85d..566ecc97ee5a 100644
--- a/arch/x86_64/defconfig
+++ b/arch/x86_64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.16-rc3-git9
-# Sat Feb 18 00:27:03 2006
+# Linux kernel version: 2.6.16-git9
+# Sat Mar 25 15:18:40 2006
#
CONFIG_X86_64=y
CONFIG_64BIT=y
@@ -38,6 +38,7 @@ CONFIG_SYSCTL=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_CPUSETS is not set
+# CONFIG_RELAY is not set
CONFIG_INITRAMFS_SOURCE=""
CONFIG_UID16=y
CONFIG_VM86=y
@@ -79,6 +80,7 @@ CONFIG_STOP_MACHINE=y
# Block layer
#
CONFIG_LBD=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
#
# IO Schedulers
@@ -139,7 +141,6 @@ CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_NR_CPUS=32
CONFIG_HOTPLUG_CPU=y
CONFIG_HPET_TIMER=y
-CONFIG_X86_PM_TIMER=y
CONFIG_HPET_EMULATE_RTC=y
CONFIG_GART_IOMMU=y
CONFIG_SWIOTLB=y
@@ -148,12 +149,13 @@ CONFIG_X86_MCE_INTEL=y
CONFIG_X86_MCE_AMD=y
# CONFIG_KEXEC is not set
# CONFIG_CRASH_DUMP is not set
-CONFIG_PHYSICAL_START=0x100000
+CONFIG_PHYSICAL_START=0x200000
CONFIG_SECCOMP=y
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
+# CONFIG_REORDER is not set
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_ISA_DMA_API=y
@@ -189,12 +191,14 @@ CONFIG_ACPI_NUMA=y
# CONFIG_ACPI_ASUS is not set
# CONFIG_ACPI_IBM is not set
CONFIG_ACPI_TOSHIBA=y
-CONFIG_ACPI_BLACKLIST_YEAR=2001
+CONFIG_ACPI_BLACKLIST_YEAR=0
# CONFIG_ACPI_DEBUG is not set
CONFIG_ACPI_EC=y
CONFIG_ACPI_POWER=y
CONFIG_ACPI_SYSTEM=y
+CONFIG_X86_PM_TIMER=y
CONFIG_ACPI_CONTAINER=y
+CONFIG_ACPI_HOTPLUG_MEMORY=y
#
# CPU Frequency scaling
@@ -232,10 +236,8 @@ CONFIG_X86_ACPI_CPUFREQ_PROC_INTF=y
CONFIG_PCI=y
CONFIG_PCI_DIRECT=y
CONFIG_PCI_MMCONFIG=y
-CONFIG_UNORDERED_IO=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_MSI=y
-# CONFIG_PCI_LEGACY_PROC is not set
# CONFIG_PCI_DEBUG is not set
#
@@ -294,6 +296,7 @@ CONFIG_INET_TCP_DIAG=y
CONFIG_TCP_CONG_BIC=y
CONFIG_IPV6=y
# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
# CONFIG_INET6_AH is not set
# CONFIG_INET6_ESP is not set
# CONFIG_INET6_IPCOMP is not set
@@ -701,6 +704,7 @@ CONFIG_S2IO=m
# Wireless LAN (non-hamradio)
#
# CONFIG_NET_RADIO is not set
+# CONFIG_NET_WIRELESS_RTNETLINK is not set
#
# Wan interfaces
@@ -861,6 +865,8 @@ CONFIG_RTC=y
CONFIG_AGP=y
CONFIG_AGP_AMD64=y
CONFIG_AGP_INTEL=y
+# CONFIG_AGP_SIS is not set
+# CONFIG_AGP_VIA is not set
# CONFIG_DRM is not set
# CONFIG_MWAVE is not set
CONFIG_RAW_DRIVER=y
@@ -907,10 +913,6 @@ CONFIG_HWMON=y
# CONFIG_IBM_ASM is not set
#
-# Multimedia Capabilities Port drivers
-#
-
-#
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
@@ -974,6 +976,7 @@ CONFIG_SOUND_ICH=y
#
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
CONFIG_USB=y
# CONFIG_USB_DEBUG is not set
@@ -1002,7 +1005,6 @@ CONFIG_USB_UHCI_HCD=y
#
# USB Device Class drivers
#
-# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
# CONFIG_USB_ACM is not set
CONFIG_USB_PRINTER=y
@@ -1121,11 +1123,7 @@ CONFIG_USB_MON=y
# CONFIG_INFINIBAND is not set
#
-# SN Devices
-#
-
-#
-# EDAC - error detection and reporting (RAS)
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
#
# CONFIG_EDAC is not set
@@ -1198,7 +1196,6 @@ CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
CONFIG_RAMFS=y
-CONFIG_RELAYFS_FS=y
# CONFIG_CONFIGFS_FS is not set
#
@@ -1321,6 +1318,7 @@ CONFIG_DETECT_SOFTLOCKUP=y
CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_VM is not set
# CONFIG_FRAME_POINTER is not set
+# CONFIG_UNWIND_INFO is not set
# CONFIG_FORCED_INLINING is not set
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_DEBUG_RODATA is not set
diff --git a/arch/x86_64/ia32/ia32_binfmt.c b/arch/x86_64/ia32/ia32_binfmt.c
index 572b3b28772d..e776139afb20 100644
--- a/arch/x86_64/ia32/ia32_binfmt.c
+++ b/arch/x86_64/ia32/ia32_binfmt.c
@@ -58,7 +58,7 @@ struct elf_phdr;
#define USE_ELF_CORE_DUMP 1
-/* Overwrite elfcore.h */
+/* Override elfcore.h */
#define _LINUX_ELFCORE_H 1
typedef unsigned int elf_greg_t;
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index 2bc55af95419..2b2d029f477c 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -430,24 +430,12 @@ put_tv32(struct compat_timeval __user *o, struct timeval *i)
return err;
}
-extern int do_setitimer(int which, struct itimerval *, struct itimerval *);
+extern unsigned int alarm_setitimer(unsigned int seconds);
asmlinkage long
sys32_alarm(unsigned int seconds)
{
- struct itimerval it_new, it_old;
- unsigned int oldalarm;
-
- it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
- it_new.it_value.tv_sec = seconds;
- it_new.it_value.tv_usec = 0;
- do_setitimer(ITIMER_REAL, &it_new, &it_old);
- oldalarm = it_old.it_value.tv_sec;
- /* ehhh.. We can't return 0 if we have an alarm pending.. */
- /* And we'd better return too much than too little anyway */
- if (it_old.it_value.tv_usec)
- oldalarm++;
- return oldalarm;
+ return alarm_setitimer(seconds);
}
/* Translations due to time_t size differences. Which affects all
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index a0f955b9995f..fffd6b0a2fab 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -60,7 +60,7 @@ static u32 __init allocate_aperture(void)
printk("Cannot allocate aperture memory hole (%p,%uK)\n",
p, aper_size>>10);
if (p)
- free_bootmem_node(nd0, (unsigned long)p, aper_size);
+ free_bootmem_node(nd0, __pa(p), aper_size);
return 0;
}
printk("Mapping aperture over %d KB of RAM @ %lx\n",
@@ -161,7 +161,7 @@ static __u32 __init search_agp_bridge(u32 *order, int *valid_agp)
int num, slot, func;
/* Poor man's PCI discovery */
- for (num = 0; num < 32; num++) {
+ for (num = 0; num < 256; num++) {
for (slot = 0; slot < 32; slot++) {
for (func = 0; func < 8; func++) {
u32 class, cap;
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index e5b14c57eaa0..d54620147e8e 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -342,6 +342,7 @@ void __init init_bsp_APIC(void)
void __cpuinit setup_local_APIC (void)
{
unsigned int value, maxlvt;
+ int i, j;
value = apic_read(APIC_LVR);
@@ -371,6 +372,25 @@ void __cpuinit setup_local_APIC (void)
apic_write(APIC_TASKPRI, value);
/*
+ * After a crash, we no longer service the interrupts and a pending
+ * interrupt from previous kernel might still have ISR bit set.
+ *
+ * Most probably by now CPU has serviced that pending interrupt and
+ * it might not have done the ack_APIC_irq() because it thought,
+ * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
+ * does not clear the ISR bit and cpu thinks it has already serivced
+ * the interrupt. Hence a vector might get locked. It was noticed
+ * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
+ */
+ for (i = APIC_ISR_NR - 1; i >= 0; i--) {
+ value = apic_read(APIC_ISR + i*0x10);
+ for (j = 31; j >= 0; j--) {
+ if (value & (1<<j))
+ ack_APIC_irq();
+ }
+ }
+
+ /*
* Now that we are all set up, enable the APIC
*/
value = apic_read(APIC_SPIV);
diff --git a/arch/x86_64/kernel/early_printk.c b/arch/x86_64/kernel/early_printk.c
index a8a6aa70d695..13af920b6594 100644
--- a/arch/x86_64/kernel/early_printk.c
+++ b/arch/x86_64/kernel/early_printk.c
@@ -60,7 +60,7 @@ static struct console early_vga_console = {
.index = -1,
};
-/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
+/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
static int early_serial_base = 0x3f8; /* ttyS0 */
@@ -80,30 +80,30 @@ static int early_serial_base = 0x3f8; /* ttyS0 */
#define DLL 0 /* Divisor Latch Low */
#define DLH 1 /* Divisor latch High */
-static int early_serial_putc(unsigned char ch)
-{
- unsigned timeout = 0xffff;
- while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
+static int early_serial_putc(unsigned char ch)
+{
+ unsigned timeout = 0xffff;
+ while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
cpu_relax();
outb(ch, early_serial_base + TXR);
return timeout ? 0 : -1;
-}
+}
static void early_serial_write(struct console *con, const char *s, unsigned n)
{
- while (*s && n-- > 0) {
- early_serial_putc(*s);
- if (*s == '\n')
- early_serial_putc('\r');
- s++;
- }
-}
+ while (*s && n-- > 0) {
+ early_serial_putc(*s);
+ if (*s == '\n')
+ early_serial_putc('\r');
+ s++;
+ }
+}
#define DEFAULT_BAUD 9600
static __init void early_serial_init(char *s)
{
- unsigned char c;
+ unsigned char c;
unsigned divisor;
unsigned baud = DEFAULT_BAUD;
char *e;
@@ -112,7 +112,7 @@ static __init void early_serial_init(char *s)
++s;
if (*s) {
- unsigned port;
+ unsigned port;
if (!strncmp(s,"0x",2)) {
early_serial_base = simple_strtoul(s, &e, 16);
} else {
@@ -136,16 +136,16 @@ static __init void early_serial_init(char *s)
outb(0x3, early_serial_base + MCR); /* DTR + RTS */
if (*s) {
- baud = simple_strtoul(s, &e, 0);
- if (baud == 0 || s == e)
+ baud = simple_strtoul(s, &e, 0);
+ if (baud == 0 || s == e)
baud = DEFAULT_BAUD;
- }
-
- divisor = 115200 / baud;
- c = inb(early_serial_base + LCR);
- outb(c | DLAB, early_serial_base + LCR);
- outb(divisor & 0xff, early_serial_base + DLL);
- outb((divisor >> 8) & 0xff, early_serial_base + DLH);
+ }
+
+ divisor = 115200 / baud;
+ c = inb(early_serial_base + LCR);
+ outb(c | DLAB, early_serial_base + LCR);
+ outb(divisor & 0xff, early_serial_base + DLL);
+ outb((divisor >> 8) & 0xff, early_serial_base + DLH);
outb(c & ~DLAB, early_serial_base + LCR);
}
@@ -202,68 +202,68 @@ struct console *early_console = &early_vga_console;
static int early_console_initialized = 0;
void early_printk(const char *fmt, ...)
-{
- char buf[512];
- int n;
+{
+ char buf[512];
+ int n;
va_list ap;
- va_start(ap,fmt);
+ va_start(ap,fmt);
n = vscnprintf(buf,512,fmt,ap);
early_console->write(early_console,buf,n);
- va_end(ap);
-}
+ va_end(ap);
+}
static int __initdata keep_early;
-int __init setup_early_printk(char *opt)
-{
+int __init setup_early_printk(char *opt)
+{
char *space;
- char buf[256];
+ char buf[256];
if (early_console_initialized)
return -1;
- strlcpy(buf,opt,sizeof(buf));
- space = strchr(buf, ' ');
+ strlcpy(buf,opt,sizeof(buf));
+ space = strchr(buf, ' ');
if (space)
- *space = 0;
+ *space = 0;
if (strstr(buf,"keep"))
- keep_early = 1;
+ keep_early = 1;
- if (!strncmp(buf, "serial", 6)) {
+ if (!strncmp(buf, "serial", 6)) {
early_serial_init(buf + 6);
early_console = &early_serial_console;
- } else if (!strncmp(buf, "ttyS", 4)) {
+ } else if (!strncmp(buf, "ttyS", 4)) {
early_serial_init(buf);
- early_console = &early_serial_console;
+ early_console = &early_serial_console;
} else if (!strncmp(buf, "vga", 3)
&& SCREEN_INFO.orig_video_isVGA == 1) {
max_xpos = SCREEN_INFO.orig_video_cols;
max_ypos = SCREEN_INFO.orig_video_lines;
current_ypos = SCREEN_INFO.orig_y;
- early_console = &early_vga_console;
+ early_console = &early_vga_console;
} else if (!strncmp(buf, "simnow", 6)) {
simnow_init(buf + 6);
early_console = &simnow_console;
keep_early = 1;
}
early_console_initialized = 1;
- register_console(early_console);
+ register_console(early_console);
return 0;
}
void __init disable_early_printk(void)
-{
+{
if (!early_console_initialized || !early_console)
return;
if (!keep_early) {
printk("disabling early console\n");
unregister_console(early_console);
early_console_initialized = 0;
- } else {
+ } else {
printk("keeping early console\n");
}
-}
+}
__setup("earlyprintk=", setup_early_printk);
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 7c10e9009d61..8538bfea30e6 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -553,7 +553,7 @@ iret_label:
/* force a signal here? this matches i386 behaviour */
/* running with kernel gs */
bad_iret:
- movq $-9999,%rdi /* better code? */
+ movq $11,%rdi /* SIGSEGV */
sti
jmp do_exit
.previous
diff --git a/arch/x86_64/kernel/functionlist b/arch/x86_64/kernel/functionlist
new file mode 100644
index 000000000000..2bcebdc3eedb
--- /dev/null
+++ b/arch/x86_64/kernel/functionlist
@@ -0,0 +1,1286 @@
+*(.text.flush_thread)
+*(.text.check_poison_obj)
+*(.text.copy_page)
+*(.text.__set_personality)
+*(.text.gart_map_sg)
+*(.text.kmem_cache_free)
+*(.text.find_get_page)
+*(.text._raw_spin_lock)
+*(.text.ide_outb)
+*(.text.unmap_vmas)
+*(.text.copy_page_range)
+*(.text.kprobe_handler)
+*(.text.__handle_mm_fault)
+*(.text.__d_lookup)
+*(.text.copy_user_generic)
+*(.text.__link_path_walk)
+*(.text.get_page_from_freelist)
+*(.text.kmem_cache_alloc)
+*(.text.drive_cmd_intr)
+*(.text.ia32_setup_sigcontext)
+*(.text.huge_pte_offset)
+*(.text.do_page_fault)
+*(.text.page_remove_rmap)
+*(.text.release_pages)
+*(.text.ide_end_request)
+*(.text.__mutex_lock_slowpath)
+*(.text.__find_get_block)
+*(.text.kfree)
+*(.text.vfs_read)
+*(.text._raw_spin_unlock)
+*(.text.free_hot_cold_page)
+*(.text.fget_light)
+*(.text.schedule)
+*(.text.memcmp)
+*(.text.touch_atime)
+*(.text.__might_sleep)
+*(.text.__down_read_trylock)
+*(.text.arch_pick_mmap_layout)
+*(.text.find_vma)
+*(.text.__make_request)
+*(.text.do_generic_mapping_read)
+*(.text.mutex_lock_interruptible)
+*(.text.__generic_file_aio_read)
+*(.text._atomic_dec_and_lock)
+*(.text.__wake_up_bit)
+*(.text.add_to_page_cache)
+*(.text.cache_alloc_debugcheck_after)
+*(.text.vm_normal_page)
+*(.text.mutex_debug_check_no_locks_freed)
+*(.text.net_rx_action)
+*(.text.__find_first_zero_bit)
+*(.text.put_page)
+*(.text._raw_read_lock)
+*(.text.__delay)
+*(.text.dnotify_parent)
+*(.text.do_path_lookup)
+*(.text.do_sync_read)
+*(.text.do_lookup)
+*(.text.bit_waitqueue)
+*(.text.file_read_actor)
+*(.text.strncpy_from_user)
+*(.text.__pagevec_lru_add_active)
+*(.text.fget)
+*(.text.dput)
+*(.text.__strnlen_user)
+*(.text.inotify_inode_queue_event)
+*(.text.rw_verify_area)
+*(.text.ide_intr)
+*(.text.inotify_dentry_parent_queue_event)
+*(.text.permission)
+*(.text.memscan)
+*(.text.hpet_rtc_interrupt)
+*(.text.do_mmap_pgoff)
+*(.text.current_fs_time)
+*(.text.vfs_getattr)
+*(.text.kmem_flagcheck)
+*(.text.mark_page_accessed)
+*(.text.free_pages_and_swap_cache)
+*(.text.generic_fillattr)
+*(.text.__block_prepare_write)
+*(.text.__set_page_dirty_nobuffers)
+*(.text.link_path_walk)
+*(.text.find_get_pages_tag)
+*(.text.ide_do_request)
+*(.text.__alloc_pages)
+*(.text.generic_permission)
+*(.text.mod_page_state_offset)
+*(.text.free_pgd_range)
+*(.text.generic_file_buffered_write)
+*(.text.number)
+*(.text.ide_do_rw_disk)
+*(.text.__brelse)
+*(.text.__mod_page_state_offset)
+*(.text.rotate_reclaimable_page)
+*(.text.find_vma_prepare)
+*(.text.find_vma_prev)
+*(.text.lru_cache_add_active)
+*(.text.__kmalloc_track_caller)
+*(.text.smp_invalidate_interrupt)
+*(.text.handle_IRQ_event)
+*(.text.__find_get_block_slow)
+*(.text.do_wp_page)
+*(.text.do_select)
+*(.text.set_user_nice)
+*(.text.sys_read)
+*(.text.do_munmap)
+*(.text.csum_partial)
+*(.text.__do_softirq)
+*(.text.may_open)
+*(.text.getname)
+*(.text.get_empty_filp)
+*(.text.__fput)
+*(.text.remove_mapping)
+*(.text.filp_ctor)
+*(.text.poison_obj)
+*(.text.unmap_region)
+*(.text.test_set_page_writeback)
+*(.text.__do_page_cache_readahead)
+*(.text.sock_def_readable)
+*(.text.ide_outl)
+*(.text.shrink_zone)
+*(.text.rb_insert_color)
+*(.text.get_request)
+*(.text.sys_pread64)
+*(.text.spin_bug)
+*(.text.ide_outsl)
+*(.text.mask_and_ack_8259A)
+*(.text.filemap_nopage)
+*(.text.page_add_file_rmap)
+*(.text.find_lock_page)
+*(.text.tcp_poll)
+*(.text.__mark_inode_dirty)
+*(.text.file_ra_state_init)
+*(.text.generic_file_llseek)
+*(.text.__pagevec_lru_add)
+*(.text.page_cache_readahead)
+*(.text.n_tty_receive_buf)
+*(.text.zonelist_policy)
+*(.text.vma_adjust)
+*(.text.test_clear_page_dirty)
+*(.text.sync_buffer)
+*(.text.do_exit)
+*(.text.__bitmap_weight)
+*(.text.alloc_pages_current)
+*(.text.get_unused_fd)
+*(.text.zone_watermark_ok)
+*(.text.cpuset_update_task_memory_state)
+*(.text.__bitmap_empty)
+*(.text.sys_munmap)
+*(.text.__inode_dir_notify)
+*(.text.__generic_file_aio_write_nolock)
+*(.text.__pte_alloc)
+*(.text.sys_select)
+*(.text.vm_acct_memory)
+*(.text.vfs_write)
+*(.text.__lru_add_drain)
+*(.text.prio_tree_insert)
+*(.text.generic_file_aio_read)
+*(.text.vma_merge)
+*(.text.block_write_full_page)
+*(.text.__page_set_anon_rmap)
+*(.text.apic_timer_interrupt)
+*(.text.release_console_sem)
+*(.text.sys_write)
+*(.text.sys_brk)
+*(.text.dup_mm)
+*(.text.read_current_timer)
+*(.text.ll_rw_block)
+*(.text.blk_rq_map_sg)
+*(.text.dbg_userword)
+*(.text.__block_commit_write)
+*(.text.cache_grow)
+*(.text.copy_strings)
+*(.text.release_task)
+*(.text.do_sync_write)
+*(.text.unlock_page)
+*(.text.load_elf_binary)
+*(.text.__follow_mount)
+*(.text.__getblk)
+*(.text.do_sys_open)
+*(.text.current_kernel_time)
+*(.text.call_rcu)
+*(.text.write_chan)
+*(.text.vsnprintf)
+*(.text.dummy_inode_setsecurity)
+*(.text.submit_bh)
+*(.text.poll_freewait)
+*(.text.bio_alloc_bioset)
+*(.text.skb_clone)
+*(.text.page_waitqueue)
+*(.text.__mutex_lock_interruptible_slowpath)
+*(.text.get_index)
+*(.text.csum_partial_copy_generic)
+*(.text.bad_range)
+*(.text.remove_vma)
+*(.text.cp_new_stat)
+*(.text.alloc_arraycache)
+*(.text.test_clear_page_writeback)
+*(.text.strsep)
+*(.text.open_namei)
+*(.text._raw_read_unlock)
+*(.text.get_vma_policy)
+*(.text.__down_write_trylock)
+*(.text.find_get_pages)
+*(.text.tcp_rcv_established)
+*(.text.generic_make_request)
+*(.text.__block_write_full_page)
+*(.text.cfq_set_request)
+*(.text.sys_inotify_init)
+*(.text.split_vma)
+*(.text.__mod_timer)
+*(.text.get_options)
+*(.text.vma_link)
+*(.text.mpage_writepages)
+*(.text.truncate_complete_page)
+*(.text.tcp_recvmsg)
+*(.text.sigprocmask)
+*(.text.filemap_populate)
+*(.text.sys_close)
+*(.text.inotify_dev_queue_event)
+*(.text.do_task_stat)
+*(.text.__dentry_open)
+*(.text.unlink_file_vma)
+*(.text.__pollwait)
+*(.text.packet_rcv_spkt)
+*(.text.drop_buffers)
+*(.text.free_pgtables)
+*(.text.generic_file_direct_write)
+*(.text.copy_process)
+*(.text.netif_receive_skb)
+*(.text.dnotify_flush)
+*(.text.print_bad_pte)
+*(.text.anon_vma_unlink)
+*(.text.sys_mprotect)
+*(.text.sync_sb_inodes)
+*(.text.find_inode_fast)
+*(.text.dummy_inode_readlink)
+*(.text.putname)
+*(.text.init_smp_flush)
+*(.text.dbg_redzone2)
+*(.text.sk_run_filter)
+*(.text.may_expand_vm)
+*(.text.generic_file_aio_write)
+*(.text.find_next_zero_bit)
+*(.text.file_kill)
+*(.text.audit_getname)
+*(.text.arch_unmap_area_topdown)
+*(.text.alloc_page_vma)
+*(.text.tcp_transmit_skb)
+*(.text.rb_next)
+*(.text.dbg_redzone1)
+*(.text.generic_file_mmap)
+*(.text.vfs_fstat)
+*(.text.sys_time)
+*(.text.page_lock_anon_vma)
+*(.text.get_unmapped_area)
+*(.text.remote_llseek)
+*(.text.__up_read)
+*(.text.fd_install)
+*(.text.eventpoll_init_file)
+*(.text.dma_alloc_coherent)
+*(.text.create_empty_buffers)
+*(.text.__mutex_unlock_slowpath)
+*(.text.dup_fd)
+*(.text.d_alloc)
+*(.text.tty_ldisc_try)
+*(.text.sys_stime)
+*(.text.__rb_rotate_right)
+*(.text.d_validate)
+*(.text.rb_erase)
+*(.text.path_release)
+*(.text.memmove)
+*(.text.invalidate_complete_page)
+*(.text.clear_inode)
+*(.text.cache_estimate)
+*(.text.alloc_buffer_head)
+*(.text.smp_call_function_interrupt)
+*(.text.flush_tlb_others)
+*(.text.file_move)
+*(.text.balance_dirty_pages_ratelimited)
+*(.text.vma_prio_tree_add)
+*(.text.timespec_trunc)
+*(.text.mempool_alloc)
+*(.text.iget_locked)
+*(.text.d_alloc_root)
+*(.text.cpuset_populate_dir)
+*(.text.anon_vma_prepare)
+*(.text.sys_newstat)
+*(.text.alloc_page_interleave)
+*(.text.__path_lookup_intent_open)
+*(.text.__pagevec_free)
+*(.text.inode_init_once)
+*(.text.free_vfsmnt)
+*(.text.__user_walk_fd)
+*(.text.cfq_idle_slice_timer)
+*(.text.sys_mmap)
+*(.text.sys_llseek)
+*(.text.prio_tree_remove)
+*(.text.filp_close)
+*(.text.file_permission)
+*(.text.vma_prio_tree_remove)
+*(.text.tcp_ack)
+*(.text.nameidata_to_filp)
+*(.text.sys_lseek)
+*(.text.percpu_counter_mod)
+*(.text.igrab)
+*(.text.__bread)
+*(.text.alloc_inode)
+*(.text.filldir)
+*(.text.__rb_rotate_left)
+*(.text.irq_affinity_write_proc)
+*(.text.init_request_from_bio)
+*(.text.find_or_create_page)
+*(.text.tty_poll)
+*(.text.tcp_sendmsg)
+*(.text.ide_wait_stat)
+*(.text.free_buffer_head)
+*(.text.flush_signal_handlers)
+*(.text.tcp_v4_rcv)
+*(.text.nr_blockdev_pages)
+*(.text.locks_remove_flock)
+*(.text.__iowrite32_copy)
+*(.text.do_filp_open)
+*(.text.try_to_release_page)
+*(.text.page_add_new_anon_rmap)
+*(.text.kmem_cache_size)
+*(.text.eth_type_trans)
+*(.text.try_to_free_buffers)
+*(.text.schedule_tail)
+*(.text.proc_lookup)
+*(.text.no_llseek)
+*(.text.kfree_skbmem)
+*(.text.do_wait)
+*(.text.do_mpage_readpage)
+*(.text.vfs_stat_fd)
+*(.text.tty_write)
+*(.text.705)
+*(.text.sync_page)
+*(.text.__remove_shared_vm_struct)
+*(.text.__kfree_skb)
+*(.text.sock_poll)
+*(.text.get_request_wait)
+*(.text.do_sigaction)
+*(.text.do_brk)
+*(.text.tcp_event_data_recv)
+*(.text.read_chan)
+*(.text.pipe_writev)
+*(.text.__emul_lookup_dentry)
+*(.text.rtc_get_rtc_time)
+*(.text.print_objinfo)
+*(.text.file_update_time)
+*(.text.do_signal)
+*(.text.disable_8259A_irq)
+*(.text.blk_queue_bounce)
+*(.text.__anon_vma_link)
+*(.text.__vma_link)
+*(.text.vfs_rename)
+*(.text.sys_newlstat)
+*(.text.sys_newfstat)
+*(.text.sys_mknod)
+*(.text.__show_regs)
+*(.text.iput)
+*(.text.get_signal_to_deliver)
+*(.text.flush_tlb_page)
+*(.text.debug_mutex_wake_waiter)
+*(.text.copy_thread)
+*(.text.clear_page_dirty_for_io)
+*(.text.buffer_io_error)
+*(.text.vfs_permission)
+*(.text.truncate_inode_pages_range)
+*(.text.sys_recvfrom)
+*(.text.remove_suid)
+*(.text.mark_buffer_dirty)
+*(.text.local_bh_enable)
+*(.text.get_zeroed_page)
+*(.text.get_vmalloc_info)
+*(.text.flush_old_exec)
+*(.text.dummy_inode_permission)
+*(.text.__bio_add_page)
+*(.text.prio_tree_replace)
+*(.text.notify_change)
+*(.text.mntput_no_expire)
+*(.text.fput)
+*(.text.__end_that_request_first)
+*(.text.wake_up_bit)
+*(.text.unuse_mm)
+*(.text.skb_release_data)
+*(.text.shrink_icache_memory)
+*(.text.sched_balance_self)
+*(.text.__pmd_alloc)
+*(.text.pipe_poll)
+*(.text.normal_poll)
+*(.text.__free_pages)
+*(.text.follow_mount)
+*(.text.cdrom_start_packet_command)
+*(.text.blk_recount_segments)
+*(.text.bio_put)
+*(.text.__alloc_skb)
+*(.text.__wake_up)
+*(.text.vm_stat_account)
+*(.text.sys_fcntl)
+*(.text.sys_fadvise64)
+*(.text._raw_write_unlock)
+*(.text.__pud_alloc)
+*(.text.alloc_page_buffers)
+*(.text.vfs_llseek)
+*(.text.sockfd_lookup)
+*(.text._raw_write_lock)
+*(.text.put_compound_page)
+*(.text.prune_dcache)
+*(.text.pipe_readv)
+*(.text.mempool_free)
+*(.text.make_ahead_window)
+*(.text.lru_add_drain)
+*(.text.constant_test_bit)
+*(.text.__clear_user)
+*(.text.arch_unmap_area)
+*(.text.anon_vma_link)
+*(.text.sys_chroot)
+*(.text.setup_arg_pages)
+*(.text.radix_tree_preload)
+*(.text.init_rwsem)
+*(.text.generic_osync_inode)
+*(.text.generic_delete_inode)
+*(.text.do_sys_poll)
+*(.text.dev_queue_xmit)
+*(.text.default_llseek)
+*(.text.__writeback_single_inode)
+*(.text.vfs_ioctl)
+*(.text.__up_write)
+*(.text.unix_poll)
+*(.text.sys_rt_sigprocmask)
+*(.text.sock_recvmsg)
+*(.text.recalc_bh_state)
+*(.text.__put_unused_fd)
+*(.text.process_backlog)
+*(.text.locks_remove_posix)
+*(.text.lease_modify)
+*(.text.expand_files)
+*(.text.end_buffer_read_nobh)
+*(.text.d_splice_alias)
+*(.text.debug_mutex_init_waiter)
+*(.text.copy_from_user)
+*(.text.cap_vm_enough_memory)
+*(.text.show_vfsmnt)
+*(.text.release_sock)
+*(.text.pfifo_fast_enqueue)
+*(.text.half_md4_transform)
+*(.text.fs_may_remount_ro)
+*(.text.do_fork)
+*(.text.copy_hugetlb_page_range)
+*(.text.cache_free_debugcheck)
+*(.text.__tcp_select_window)
+*(.text.task_handoff_register)
+*(.text.sys_open)
+*(.text.strlcpy)
+*(.text.skb_copy_datagram_iovec)
+*(.text.set_up_list3s)
+*(.text.release_open_intent)
+*(.text.qdisc_restart)
+*(.text.n_tty_chars_in_buffer)
+*(.text.inode_change_ok)
+*(.text.__downgrade_write)
+*(.text.debug_mutex_unlock)
+*(.text.add_timer_randomness)
+*(.text.sock_common_recvmsg)
+*(.text.set_bh_page)
+*(.text.printk_lock)
+*(.text.path_release_on_umount)
+*(.text.ip_output)
+*(.text.ide_build_dmatable)
+*(.text.__get_user_8)
+*(.text.end_buffer_read_sync)
+*(.text.__d_path)
+*(.text.d_move)
+*(.text.del_timer)
+*(.text.constant_test_bit)
+*(.text.blockable_page_cache_readahead)
+*(.text.tty_read)
+*(.text.sys_readlink)
+*(.text.sys_faccessat)
+*(.text.read_swap_cache_async)
+*(.text.pty_write_room)
+*(.text.page_address_in_vma)
+*(.text.kthread)
+*(.text.cfq_exit_io_context)
+*(.text.__tcp_push_pending_frames)
+*(.text.sys_pipe)
+*(.text.submit_bio)
+*(.text.pid_revalidate)
+*(.text.page_referenced_file)
+*(.text.lock_sock)
+*(.text.get_page_state_node)
+*(.text.generic_block_bmap)
+*(.text.do_setitimer)
+*(.text.dev_queue_xmit_nit)
+*(.text.copy_from_read_buf)
+*(.text.__const_udelay)
+*(.text.console_conditional_schedule)
+*(.text.wake_up_new_task)
+*(.text.wait_for_completion_interruptible)
+*(.text.tcp_rcv_rtt_update)
+*(.text.sys_mlockall)
+*(.text.set_fs_altroot)
+*(.text.schedule_timeout)
+*(.text.nr_free_pagecache_pages)
+*(.text.nf_iterate)
+*(.text.mapping_tagged)
+*(.text.ip_queue_xmit)
+*(.text.ip_local_deliver)
+*(.text.follow_page)
+*(.text.elf_map)
+*(.text.dummy_file_permission)
+*(.text.dispose_list)
+*(.text.dentry_open)
+*(.text.dentry_iput)
+*(.text.bio_alloc)
+*(.text.alloc_skb_from_cache)
+*(.text.wait_on_page_bit)
+*(.text.vfs_readdir)
+*(.text.vfs_lstat)
+*(.text.seq_escape)
+*(.text.__posix_lock_file)
+*(.text.mm_release)
+*(.text.kref_put)
+*(.text.ip_rcv)
+*(.text.__iget)
+*(.text.free_pages)
+*(.text.find_mergeable_anon_vma)
+*(.text.find_extend_vma)
+*(.text.dummy_inode_listsecurity)
+*(.text.bio_add_page)
+*(.text.__vm_enough_memory)
+*(.text.vfs_stat)
+*(.text.tty_paranoia_check)
+*(.text.tcp_read_sock)
+*(.text.tcp_data_queue)
+*(.text.sys_uname)
+*(.text.sys_renameat)
+*(.text.__strncpy_from_user)
+*(.text.__mutex_init)
+*(.text.__lookup_hash)
+*(.text.kref_get)
+*(.text.ip_route_input)
+*(.text.__insert_inode_hash)
+*(.text.do_sock_write)
+*(.text.blk_done_softirq)
+*(.text.__wake_up_sync)
+*(.text.__vma_link_rb)
+*(.text.tty_ioctl)
+*(.text.tracesys)
+*(.text.sys_getdents)
+*(.text.sys_dup)
+*(.text.stub_execve)
+*(.text.sha_transform)
+*(.text.radix_tree_tag_clear)
+*(.text.put_unused_fd)
+*(.text.put_files_struct)
+*(.text.mpage_readpages)
+*(.text.may_delete)
+*(.text.kmem_cache_create)
+*(.text.ip_mc_output)
+*(.text.interleave_nodes)
+*(.text.groups_search)
+*(.text.generic_drop_inode)
+*(.text.generic_commit_write)
+*(.text.fcntl_setlk)
+*(.text.exit_mmap)
+*(.text.end_page_writeback)
+*(.text.__d_rehash)
+*(.text.debug_mutex_free_waiter)
+*(.text.csum_ipv6_magic)
+*(.text.count)
+*(.text.cleanup_rbuf)
+*(.text.check_spinlock_acquired_node)
+*(.text.can_vma_merge_after)
+*(.text.bio_endio)
+*(.text.alloc_pidmap)
+*(.text.write_ldt)
+*(.text.vmtruncate_range)
+*(.text.vfs_create)
+*(.text.__user_walk)
+*(.text.update_send_head)
+*(.text.unmap_underlying_metadata)
+*(.text.tty_ldisc_deref)
+*(.text.tcp_setsockopt)
+*(.text.tcp_send_ack)
+*(.text.sys_pause)
+*(.text.sys_gettimeofday)
+*(.text.sync_dirty_buffer)
+*(.text.strncmp)
+*(.text.release_posix_timer)
+*(.text.proc_file_read)
+*(.text.prepare_to_wait)
+*(.text.locks_mandatory_locked)
+*(.text.interruptible_sleep_on_timeout)
+*(.text.inode_sub_bytes)
+*(.text.in_group_p)
+*(.text.hrtimer_try_to_cancel)
+*(.text.filldir64)
+*(.text.fasync_helper)
+*(.text.dummy_sb_pivotroot)
+*(.text.d_lookup)
+*(.text.d_instantiate)
+*(.text.__d_find_alias)
+*(.text.cpu_idle_wait)
+*(.text.cond_resched_lock)
+*(.text.chown_common)
+*(.text.blk_congestion_wait)
+*(.text.activate_page)
+*(.text.unlock_buffer)
+*(.text.tty_wakeup)
+*(.text.tcp_v4_do_rcv)
+*(.text.tcp_current_mss)
+*(.text.sys_openat)
+*(.text.sys_fchdir)
+*(.text.strnlen_user)
+*(.text.strnlen)
+*(.text.strchr)
+*(.text.sock_common_getsockopt)
+*(.text.skb_checksum)
+*(.text.remove_wait_queue)
+*(.text.rb_replace_node)
+*(.text.radix_tree_node_ctor)
+*(.text.pty_chars_in_buffer)
+*(.text.profile_hit)
+*(.text.prio_tree_left)
+*(.text.pgd_clear_bad)
+*(.text.pfifo_fast_dequeue)
+*(.text.page_referenced)
+*(.text.open_exec)
+*(.text.mmput)
+*(.text.mm_init)
+*(.text.__ide_dma_off_quietly)
+*(.text.ide_dma_intr)
+*(.text.hrtimer_start)
+*(.text.get_io_context)
+*(.text.__get_free_pages)
+*(.text.find_first_zero_bit)
+*(.text.file_free_rcu)
+*(.text.dummy_socket_sendmsg)
+*(.text.do_unlinkat)
+*(.text.do_arch_prctl)
+*(.text.destroy_inode)
+*(.text.can_vma_merge_before)
+*(.text.block_sync_page)
+*(.text.block_prepare_write)
+*(.text.bio_init)
+*(.text.arch_ptrace)
+*(.text.wake_up_inode)
+*(.text.wait_on_retry_sync_kiocb)
+*(.text.vma_prio_tree_next)
+*(.text.tcp_rcv_space_adjust)
+*(.text.__tcp_ack_snd_check)
+*(.text.sys_utime)
+*(.text.sys_recvmsg)
+*(.text.sys_mremap)
+*(.text.sys_bdflush)
+*(.text.sleep_on)
+*(.text.set_page_dirty_lock)
+*(.text.seq_path)
+*(.text.schedule_timeout_interruptible)
+*(.text.sched_fork)
+*(.text.rt_run_flush)
+*(.text.profile_munmap)
+*(.text.prepare_binprm)
+*(.text.__pagevec_release_nonlru)
+*(.text.m_show)
+*(.text.lookup_mnt)
+*(.text.__lookup_mnt)
+*(.text.lock_timer_base)
+*(.text.is_subdir)
+*(.text.invalidate_bh_lru)
+*(.text.init_buffer_head)
+*(.text.ifind_fast)
+*(.text.ide_dma_start)
+*(.text.__get_page_state)
+*(.text.flock_to_posix_lock)
+*(.text.__find_symbol)
+*(.text.do_futex)
+*(.text.do_execve)
+*(.text.dirty_writeback_centisecs_handler)
+*(.text.dev_watchdog)
+*(.text.can_share_swap_page)
+*(.text.blkdev_put)
+*(.text.bio_get_nr_vecs)
+*(.text.xfrm_compile_policy)
+*(.text.vma_prio_tree_insert)
+*(.text.vfs_lstat_fd)
+*(.text.__user_path_lookup_open)
+*(.text.thread_return)
+*(.text.tcp_send_delayed_ack)
+*(.text.sock_def_error_report)
+*(.text.shrink_slab)
+*(.text.serial_out)
+*(.text.seq_read)
+*(.text.secure_ip_id)
+*(.text.search_binary_handler)
+*(.text.proc_pid_unhash)
+*(.text.pagevec_lookup)
+*(.text.new_inode)
+*(.text.memcpy_toiovec)
+*(.text.locks_free_lock)
+*(.text.__lock_page)
+*(.text.__lock_buffer)
+*(.text.load_module)
+*(.text.is_bad_inode)
+*(.text.invalidate_inode_buffers)
+*(.text.insert_vm_struct)
+*(.text.inode_setattr)
+*(.text.inode_add_bytes)
+*(.text.ide_read_24)
+*(.text.ide_get_error_location)
+*(.text.ide_do_drive_cmd)
+*(.text.get_locked_pte)
+*(.text.get_filesystem_list)
+*(.text.generic_file_open)
+*(.text.follow_down)
+*(.text.find_next_bit)
+*(.text.__find_first_bit)
+*(.text.exit_mm)
+*(.text.exec_keys)
+*(.text.end_buffer_write_sync)
+*(.text.end_bio_bh_io_sync)
+*(.text.dummy_socket_shutdown)
+*(.text.d_rehash)
+*(.text.d_path)
+*(.text.do_ioctl)
+*(.text.dget_locked)
+*(.text.copy_thread_group_keys)
+*(.text.cdrom_end_request)
+*(.text.cap_bprm_apply_creds)
+*(.text.blk_rq_bio_prep)
+*(.text.__bitmap_intersects)
+*(.text.bio_phys_segments)
+*(.text.bio_free)
+*(.text.arch_get_unmapped_area_topdown)
+*(.text.writeback_in_progress)
+*(.text.vfs_follow_link)
+*(.text.tcp_rcv_state_process)
+*(.text.tcp_check_space)
+*(.text.sys_stat)
+*(.text.sys_rt_sigreturn)
+*(.text.sys_rt_sigaction)
+*(.text.sys_remap_file_pages)
+*(.text.sys_pwrite64)
+*(.text.sys_fchownat)
+*(.text.sys_fchmodat)
+*(.text.strncat)
+*(.text.strlcat)
+*(.text.strcmp)
+*(.text.steal_locks)
+*(.text.sock_create)
+*(.text.sk_stream_rfree)
+*(.text.sk_stream_mem_schedule)
+*(.text.skip_atoi)
+*(.text.sk_alloc)
+*(.text.show_stat)
+*(.text.set_fs_pwd)
+*(.text.set_binfmt)
+*(.text.pty_unthrottle)
+*(.text.proc_symlink)
+*(.text.pipe_release)
+*(.text.pageout)
+*(.text.n_tty_write_wakeup)
+*(.text.n_tty_ioctl)
+*(.text.nr_free_zone_pages)
+*(.text.migration_thread)
+*(.text.mempool_free_slab)
+*(.text.meminfo_read_proc)
+*(.text.max_sane_readahead)
+*(.text.lru_cache_add)
+*(.text.kill_fasync)
+*(.text.kernel_read)
+*(.text.invalidate_mapping_pages)
+*(.text.inode_has_buffers)
+*(.text.init_once)
+*(.text.inet_sendmsg)
+*(.text.idedisk_issue_flush)
+*(.text.generic_file_write)
+*(.text.free_more_memory)
+*(.text.__free_fdtable)
+*(.text.filp_dtor)
+*(.text.exit_sem)
+*(.text.exit_itimers)
+*(.text.error_interrupt)
+*(.text.end_buffer_async_write)
+*(.text.eligible_child)
+*(.text.elf_map)
+*(.text.dump_task_regs)
+*(.text.dummy_task_setscheduler)
+*(.text.dummy_socket_accept)
+*(.text.dummy_file_free_security)
+*(.text.__down_read)
+*(.text.do_sock_read)
+*(.text.do_sigaltstack)
+*(.text.do_mremap)
+*(.text.current_io_context)
+*(.text.cpu_swap_callback)
+*(.text.copy_vma)
+*(.text.cap_bprm_set_security)
+*(.text.blk_insert_request)
+*(.text.bio_map_kern_endio)
+*(.text.bio_hw_segments)
+*(.text.bictcp_cong_avoid)
+*(.text.add_interrupt_randomness)
+*(.text.wait_for_completion)
+*(.text.version_read_proc)
+*(.text.unix_write_space)
+*(.text.tty_ldisc_ref_wait)
+*(.text.tty_ldisc_put)
+*(.text.try_to_wake_up)
+*(.text.tcp_v4_tw_remember_stamp)
+*(.text.tcp_try_undo_dsack)
+*(.text.tcp_may_send_now)
+*(.text.sys_waitid)
+*(.text.sys_sched_getparam)
+*(.text.sys_getppid)
+*(.text.sys_getcwd)
+*(.text.sys_dup2)
+*(.text.sys_chmod)
+*(.text.sys_chdir)
+*(.text.sprintf)
+*(.text.sock_wfree)
+*(.text.sock_aio_write)
+*(.text.skb_drop_fraglist)
+*(.text.skb_dequeue)
+*(.text.set_close_on_exec)
+*(.text.set_brk)
+*(.text.seq_puts)
+*(.text.SELECT_DRIVE)
+*(.text.sched_exec)
+*(.text.return_EIO)
+*(.text.remove_from_page_cache)
+*(.text.rcu_start_batch)
+*(.text.__put_task_struct)
+*(.text.proc_pid_readdir)
+*(.text.proc_get_inode)
+*(.text.prepare_to_wait_exclusive)
+*(.text.pipe_wait)
+*(.text.pipe_new)
+*(.text.pdflush_operation)
+*(.text.__pagevec_release)
+*(.text.pagevec_lookup_tag)
+*(.text.packet_rcv)
+*(.text.n_tty_set_room)
+*(.text.nr_free_pages)
+*(.text.__net_timestamp)
+*(.text.mpage_end_io_read)
+*(.text.mod_timer)
+*(.text.__memcpy)
+*(.text.mb_cache_shrink_fn)
+*(.text.lock_rename)
+*(.text.kstrdup)
+*(.text.is_ignored)
+*(.text.int_very_careful)
+*(.text.inotify_inode_is_dead)
+*(.text.inotify_get_cookie)
+*(.text.inode_get_bytes)
+*(.text.init_timer)
+*(.text.init_dev)
+*(.text.inet_getname)
+*(.text.ide_map_sg)
+*(.text.__ide_dma_end)
+*(.text.hrtimer_get_remaining)
+*(.text.get_task_mm)
+*(.text.get_random_int)
+*(.text.free_pipe_info)
+*(.text.filemap_write_and_wait_range)
+*(.text.exit_thread)
+*(.text.enter_idle)
+*(.text.end_that_request_first)
+*(.text.end_8259A_irq)
+*(.text.dummy_file_alloc_security)
+*(.text.do_group_exit)
+*(.text.debug_mutex_init)
+*(.text.cpuset_exit)
+*(.text.cpu_idle)
+*(.text.copy_semundo)
+*(.text.copy_files)
+*(.text.chrdev_open)
+*(.text.cdrom_transfer_packet_command)
+*(.text.cdrom_mode_sense)
+*(.text.blk_phys_contig_segment)
+*(.text.blk_get_queue)
+*(.text.bio_split)
+*(.text.audit_alloc)
+*(.text.anon_pipe_buf_release)
+*(.text.add_wait_queue_exclusive)
+*(.text.add_wait_queue)
+*(.text.acct_process)
+*(.text.account)
+*(.text.zeromap_page_range)
+*(.text.yield)
+*(.text.writeback_acquire)
+*(.text.worker_thread)
+*(.text.wait_on_page_writeback_range)
+*(.text.__wait_on_buffer)
+*(.text.vscnprintf)
+*(.text.vmalloc_to_pfn)
+*(.text.vgacon_save_screen)
+*(.text.vfs_unlink)
+*(.text.vfs_rmdir)
+*(.text.unregister_md_personality)
+*(.text.unlock_new_inode)
+*(.text.unix_stream_sendmsg)
+*(.text.unix_stream_recvmsg)
+*(.text.unhash_process)
+*(.text.udp_v4_lookup_longway)
+*(.text.tty_ldisc_flush)
+*(.text.tty_ldisc_enable)
+*(.text.tty_hung_up_p)
+*(.text.tty_buffer_free_all)
+*(.text.tso_fragment)
+*(.text.try_to_del_timer_sync)
+*(.text.tcp_v4_err)
+*(.text.tcp_unhash)
+*(.text.tcp_seq_next)
+*(.text.tcp_select_initial_window)
+*(.text.tcp_sacktag_write_queue)
+*(.text.tcp_cwnd_validate)
+*(.text.sys_vhangup)
+*(.text.sys_uselib)
+*(.text.sys_symlink)
+*(.text.sys_signal)
+*(.text.sys_poll)
+*(.text.sys_mount)
+*(.text.sys_kill)
+*(.text.sys_ioctl)
+*(.text.sys_inotify_add_watch)
+*(.text.sys_getuid)
+*(.text.sys_getrlimit)
+*(.text.sys_getitimer)
+*(.text.sys_getgroups)
+*(.text.sys_ftruncate)
+*(.text.sysfs_lookup)
+*(.text.sys_exit_group)
+*(.text.stub_fork)
+*(.text.sscanf)
+*(.text.sock_map_fd)
+*(.text.sock_get_timestamp)
+*(.text.__sock_create)
+*(.text.smp_call_function_single)
+*(.text.sk_stop_timer)
+*(.text.skb_copy_and_csum_datagram)
+*(.text.__skb_checksum_complete)
+*(.text.single_next)
+*(.text.sigqueue_alloc)
+*(.text.shrink_dcache_parent)
+*(.text.select_idle_routine)
+*(.text.run_workqueue)
+*(.text.run_local_timers)
+*(.text.remove_inode_hash)
+*(.text.remove_dquot_ref)
+*(.text.register_binfmt)
+*(.text.read_cache_pages)
+*(.text.rb_last)
+*(.text.pty_open)
+*(.text.proc_root_readdir)
+*(.text.proc_pid_flush)
+*(.text.proc_pident_lookup)
+*(.text.proc_fill_super)
+*(.text.proc_exe_link)
+*(.text.posix_locks_deadlock)
+*(.text.pipe_iov_copy_from_user)
+*(.text.opost)
+*(.text.nf_register_hook)
+*(.text.netif_rx_ni)
+*(.text.m_start)
+*(.text.mpage_writepage)
+*(.text.mm_alloc)
+*(.text.memory_open)
+*(.text.mark_buffer_async_write)
+*(.text.lru_add_drain_all)
+*(.text.locks_init_lock)
+*(.text.locks_delete_lock)
+*(.text.lock_hrtimer_base)
+*(.text.load_script)
+*(.text.__kill_fasync)
+*(.text.ip_mc_sf_allow)
+*(.text.__ioremap)
+*(.text.int_with_check)
+*(.text.int_sqrt)
+*(.text.install_thread_keyring)
+*(.text.init_page_buffers)
+*(.text.inet_sock_destruct)
+*(.text.idle_notifier_register)
+*(.text.ide_execute_command)
+*(.text.ide_end_drive_cmd)
+*(.text.__ide_dma_host_on)
+*(.text.hrtimer_run_queues)
+*(.text.hpet_mask_rtc_irq_bit)
+*(.text.__get_zone_counts)
+*(.text.get_zone_counts)
+*(.text.get_write_access)
+*(.text.get_fs_struct)
+*(.text.get_dirty_limits)
+*(.text.generic_readlink)
+*(.text.free_hot_page)
+*(.text.finish_wait)
+*(.text.find_inode)
+*(.text.find_first_bit)
+*(.text.__filemap_fdatawrite_range)
+*(.text.__filemap_copy_from_user_iovec)
+*(.text.exit_aio)
+*(.text.elv_set_request)
+*(.text.elv_former_request)
+*(.text.dup_namespace)
+*(.text.dupfd)
+*(.text.dummy_socket_getsockopt)
+*(.text.dummy_sb_post_mountroot)
+*(.text.dummy_quotactl)
+*(.text.dummy_inode_rename)
+*(.text.__do_SAK)
+*(.text.do_pipe)
+*(.text.do_fsync)
+*(.text.d_instantiate_unique)
+*(.text.d_find_alias)
+*(.text.deny_write_access)
+*(.text.dentry_unhash)
+*(.text.d_delete)
+*(.text.datagram_poll)
+*(.text.cpuset_fork)
+*(.text.cpuid_read)
+*(.text.copy_namespace)
+*(.text.cond_resched)
+*(.text.check_version)
+*(.text.__change_page_attr)
+*(.text.cfq_slab_kill)
+*(.text.cfq_completed_request)
+*(.text.cdrom_pc_intr)
+*(.text.cdrom_decode_status)
+*(.text.cap_capset_check)
+*(.text.blk_put_request)
+*(.text.bio_fs_destructor)
+*(.text.bictcp_min_cwnd)
+*(.text.alloc_chrdev_region)
+*(.text.add_element)
+*(.text.acct_update_integrals)
+*(.text.write_boundary_block)
+*(.text.writeback_release)
+*(.text.writeback_inodes)
+*(.text.wake_up_state)
+*(.text.__wake_up_locked)
+*(.text.wake_futex)
+*(.text.wait_task_inactive)
+*(.text.__wait_on_freeing_inode)
+*(.text.wait_noreap_copyout)
+*(.text.vmstat_start)
+*(.text.vgacon_do_font_op)
+*(.text.vfs_readv)
+*(.text.vfs_quota_sync)
+*(.text.update_queue)
+*(.text.unshare_files)
+*(.text.unmap_vm_area)
+*(.text.unix_socketpair)
+*(.text.unix_release_sock)
+*(.text.unix_detach_fds)
+*(.text.unix_create1)
+*(.text.unix_bind)
+*(.text.udp_sendmsg)
+*(.text.udp_rcv)
+*(.text.udp_queue_rcv_skb)
+*(.text.uart_write)
+*(.text.uart_startup)
+*(.text.uart_open)
+*(.text.tty_vhangup)
+*(.text.tty_termios_baud_rate)
+*(.text.tty_release)
+*(.text.tty_ldisc_ref)
+*(.text.throttle_vm_writeout)
+*(.text.058)
+*(.text.tcp_xmit_probe_skb)
+*(.text.tcp_v4_send_check)
+*(.text.tcp_v4_destroy_sock)
+*(.text.tcp_sync_mss)
+*(.text.tcp_snd_test)
+*(.text.tcp_slow_start)
+*(.text.tcp_send_fin)
+*(.text.tcp_rtt_estimator)
+*(.text.tcp_parse_options)
+*(.text.tcp_ioctl)
+*(.text.tcp_init_tso_segs)
+*(.text.tcp_init_cwnd)
+*(.text.tcp_getsockopt)
+*(.text.tcp_fin)
+*(.text.tcp_connect)
+*(.text.tcp_cong_avoid)
+*(.text.__tcp_checksum_complete_user)
+*(.text.task_dumpable)
+*(.text.sys_wait4)
+*(.text.sys_utimes)
+*(.text.sys_symlinkat)
+*(.text.sys_socketpair)
+*(.text.sys_rmdir)
+*(.text.sys_readahead)
+*(.text.sys_nanosleep)
+*(.text.sys_linkat)
+*(.text.sys_fstat)
+*(.text.sysfs_readdir)
+*(.text.sys_execve)
+*(.text.sysenter_tracesys)
+*(.text.sys_chown)
+*(.text.stub_clone)
+*(.text.strrchr)
+*(.text.strncpy)
+*(.text.stopmachine_set_state)
+*(.text.sock_sendmsg)
+*(.text.sock_release)
+*(.text.sock_fasync)
+*(.text.sock_close)
+*(.text.sk_stream_write_space)
+*(.text.sk_reset_timer)
+*(.text.skb_split)
+*(.text.skb_recv_datagram)
+*(.text.skb_queue_tail)
+*(.text.sk_attach_filter)
+*(.text.si_swapinfo)
+*(.text.simple_strtoll)
+*(.text.set_termios)
+*(.text.set_task_comm)
+*(.text.set_shrinker)
+*(.text.set_normalized_timespec)
+*(.text.set_brk)
+*(.text.serial_in)
+*(.text.seq_printf)
+*(.text.secure_dccp_sequence_number)
+*(.text.rwlock_bug)
+*(.text.rt_hash_code)
+*(.text.__rta_fill)
+*(.text.__request_resource)
+*(.text.relocate_new_kernel)
+*(.text.release_thread)
+*(.text.release_mem)
+*(.text.rb_prev)
+*(.text.rb_first)
+*(.text.random_poll)
+*(.text.__put_super_and_need_restart)
+*(.text.pty_write)
+*(.text.ptrace_stop)
+*(.text.proc_self_readlink)
+*(.text.proc_root_lookup)
+*(.text.proc_root_link)
+*(.text.proc_pid_make_inode)
+*(.text.proc_pid_attr_write)
+*(.text.proc_lookupfd)
+*(.text.proc_delete_inode)
+*(.text.posix_same_owner)
+*(.text.posix_block_lock)
+*(.text.poll_initwait)
+*(.text.pipe_write)
+*(.text.pipe_read_fasync)
+*(.text.pipe_ioctl)
+*(.text.pdflush)
+*(.text.pci_user_read_config_dword)
+*(.text.page_readlink)
+*(.text.null_lseek)
+*(.text.nf_hook_slow)
+*(.text.netlink_sock_destruct)
+*(.text.netlink_broadcast)
+*(.text.neigh_resolve_output)
+*(.text.name_to_int)
+*(.text.mwait_idle)
+*(.text.mutex_trylock)
+*(.text.mutex_debug_check_no_locks_held)
+*(.text.m_stop)
+*(.text.mpage_end_io_write)
+*(.text.mpage_alloc)
+*(.text.move_page_tables)
+*(.text.mounts_open)
+*(.text.__memset)
+*(.text.memcpy_fromiovec)
+*(.text.make_8259A_irq)
+*(.text.lookup_user_key_possessed)
+*(.text.lookup_create)
+*(.text.locks_insert_lock)
+*(.text.locks_alloc_lock)
+*(.text.kthread_should_stop)
+*(.text.kswapd)
+*(.text.kobject_uevent)
+*(.text.kobject_get_path)
+*(.text.kobject_get)
+*(.text.klist_children_put)
+*(.text.__ip_route_output_key)
+*(.text.ip_flush_pending_frames)
+*(.text.ip_compute_csum)
+*(.text.ip_append_data)
+*(.text.ioc_set_batching)
+*(.text.invalidate_inode_pages)
+*(.text.__invalidate_device)
+*(.text.install_arg_page)
+*(.text.in_sched_functions)
+*(.text.inotify_unmount_inodes)
+*(.text.init_once)
+*(.text.init_cdrom_command)
+*(.text.inet_stream_connect)
+*(.text.inet_sk_rebuild_header)
+*(.text.inet_csk_addr2sockaddr)
+*(.text.inet_create)
+*(.text.ifind)
+*(.text.ide_setup_dma)
+*(.text.ide_outsw)
+*(.text.ide_fixstring)
+*(.text.ide_dma_setup)
+*(.text.ide_cdrom_packet)
+*(.text.ide_cd_put)
+*(.text.ide_build_sglist)
+*(.text.i8259A_shutdown)
+*(.text.hung_up_tty_ioctl)
+*(.text.hrtimer_nanosleep)
+*(.text.hrtimer_init)
+*(.text.hrtimer_cancel)
+*(.text.hash_futex)
+*(.text.group_send_sig_info)
+*(.text.grab_cache_page_nowait)
+*(.text.get_wchan)
+*(.text.get_stack)
+*(.text.get_page_state)
+*(.text.getnstimeofday)
+*(.text.get_node)
+*(.text.get_kprobe)
+*(.text.generic_unplug_device)
+*(.text.free_task)
+*(.text.frag_show)
+*(.text.find_next_zero_string)
+*(.text.filp_open)
+*(.text.fillonedir)
+*(.text.exit_io_context)
+*(.text.exit_idle)
+*(.text.exact_lock)
+*(.text.eth_header)
+*(.text.dummy_unregister_security)
+*(.text.dummy_socket_post_create)
+*(.text.dummy_socket_listen)
+*(.text.dummy_quota_on)
+*(.text.dummy_inode_follow_link)
+*(.text.dummy_file_receive)
+*(.text.dummy_file_mprotect)
+*(.text.dummy_file_lock)
+*(.text.dummy_file_ioctl)
+*(.text.dummy_bprm_post_apply_creds)
+*(.text.do_writepages)
+*(.text.__down_interruptible)
+*(.text.do_notify_resume)
+*(.text.do_acct_process)
+*(.text.del_timer_sync)
+*(.text.default_rebuild_header)
+*(.text.d_callback)
+*(.text.dcache_readdir)
+*(.text.ctrl_dumpfamily)
+*(.text.cpuset_rmdir)
+*(.text.copy_strings_kernel)
+*(.text.con_write_room)
+*(.text.complete_all)
+*(.text.collect_sigign_sigcatch)
+*(.text.clear_user)
+*(.text.check_unthrottle)
+*(.text.cdrom_release)
+*(.text.cdrom_newpc_intr)
+*(.text.cdrom_ioctl)
+*(.text.cdrom_check_status)
+*(.text.cdev_put)
+*(.text.cdev_add)
+*(.text.cap_ptrace)
+*(.text.cap_bprm_secureexec)
+*(.text.cache_alloc_refill)
+*(.text.bmap)
+*(.text.blk_run_queue)
+*(.text.blk_queue_dma_alignment)
+*(.text.blk_ordered_req_seq)
+*(.text.blk_backing_dev_unplug)
+*(.text.__bitmap_subset)
+*(.text.__bitmap_and)
+*(.text.bio_unmap_user)
+*(.text.__bforget)
+*(.text.bd_forget)
+*(.text.bad_pipe_w)
+*(.text.bad_get_user)
+*(.text.audit_free)
+*(.text.anon_vma_ctor)
+*(.text.anon_pipe_buf_map)
+*(.text.alloc_sock_iocb)
+*(.text.alloc_fdset)
+*(.text.aio_kick_handler)
+*(.text.__add_entropy_words)
+*(.text.add_disk_randomness)
diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S
index 02fc7fa0ea28..6df05e6034fa 100644
--- a/arch/x86_64/kernel/head.S
+++ b/arch/x86_64/kernel/head.S
@@ -26,6 +26,7 @@
*/
.text
+ .section .bootstrap.text
.code32
.globl startup_32
/* %bx: 1 if coming from smp trampoline on secondary cpu */
@@ -192,7 +193,8 @@ startup_64:
movq initial_code(%rip),%rax
jmp *%rax
- /* SMP bootup changes these two */
+ /* SMP bootup changes these two */
+ .align 8
.globl initial_code
initial_code:
.quad x86_64_start_kernel
@@ -237,7 +239,7 @@ ENTRY(no_long_mode)
.org 0xf00
.globl pGDT32
pGDT32:
- .word gdt_end-cpu_gdt_table
+ .word gdt_end-cpu_gdt_table-1
.long cpu_gdt_table-__START_KERNEL_map
.org 0xf10
@@ -293,8 +295,6 @@ NEXT_PAGE(level2_kernel_pgt)
/* Module mapping starts here */
.fill 492,8,0
-NEXT_PAGE(empty_zero_page)
-
NEXT_PAGE(level3_physmem_pgt)
.quad phys_level2_kernel_pgt | 0x007 /* so that __va works even before pagetable_init */
.fill 511,8,0
@@ -337,7 +337,7 @@ ENTRY(boot_level4_pgt)
.align 16
.globl cpu_gdt_descr
cpu_gdt_descr:
- .word gdt_end-cpu_gdt_table
+ .word gdt_end-cpu_gdt_table-1
gdt:
.quad cpu_gdt_table
#ifdef CONFIG_SMP
@@ -352,7 +352,8 @@ gdt:
* Also sysret mandates a special GDT layout
*/
-.align PAGE_SIZE
+ .section .data.page_aligned, "aw"
+ .align PAGE_SIZE
/* The TLS descriptors are currently at a different place compared to i386.
Hopefully nobody expects them at a fixed place (Wine?) */
@@ -378,9 +379,12 @@ gdt_end:
/* zero the remaining page */
.fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
-ENTRY(idt_table)
- .rept 256
- .quad 0
- .quad 0
- .endr
+ .section .bss, "aw", @nobits
+ .align L1_CACHE_BYTES
+ENTRY(idt_table)
+ .skip 256 * 16
+ .section .bss.page_aligned, "aw", @nobits
+ .align PAGE_SIZE
+ENTRY(empty_zero_page)
+ .skip PAGE_SIZE
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index ffed464e6b12..77b4c608cca0 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -50,7 +50,7 @@ static int no_timer_check;
int disable_timer_pin_1 __initdata;
-int timer_over_8254 __initdata = 1;
+int timer_over_8254 __initdata = 0;
/* Where if anywhere is the i8259 connect in external int mode */
static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
@@ -310,7 +310,7 @@ void __init check_ioapic(void)
force_iommu) &&
!iommu_aperture_allowed) {
printk(KERN_INFO
- "Looks like a VIA chipset. Disabling IOMMU. Overwrite with \"iommu=allowed\"\n");
+ "Looks like a VIA chipset. Disabling IOMMU. Override with \"iommu=allowed\"\n");
iommu_aperture_disabled = 1;
}
#endif
@@ -1848,7 +1848,7 @@ static inline void check_timer(void)
*/
setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
if (timer_irq_works()) {
- printk("works.\n");
+ apic_printk(APIC_VERBOSE," works.\n");
nmi_watchdog_default();
if (nmi_watchdog == NMI_IO_APIC) {
setup_nmi();
@@ -1860,7 +1860,7 @@ static inline void check_timer(void)
*/
clear_IO_APIC_pin(apic2, pin2);
}
- printk(" failed.\n");
+ apic_printk(APIC_VERBOSE," failed.\n");
if (nmi_watchdog == NMI_IO_APIC) {
printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
@@ -1875,7 +1875,7 @@ static inline void check_timer(void)
enable_8259A_irq(0);
if (timer_irq_works()) {
- apic_printk(APIC_QUIET, " works.\n");
+ apic_printk(APIC_VERBOSE," works.\n");
return;
}
apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index b8b9529fa89e..04282ef9fbd4 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -139,8 +139,7 @@ static void mce_panic(char *msg, struct mce *backup, unsigned long start)
static int mce_available(struct cpuinfo_x86 *c)
{
- return test_bit(X86_FEATURE_MCE, &c->x86_capability) &&
- test_bit(X86_FEATURE_MCA, &c->x86_capability);
+ return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
}
static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c
index 9013a90b5c2e..b17cf3eba359 100644
--- a/arch/x86_64/kernel/mpparse.c
+++ b/arch/x86_64/kernel/mpparse.c
@@ -106,11 +106,11 @@ static int __init mpf_checksum(unsigned char *mp, int len)
return sum & 0xFF;
}
-static void __init MP_processor_info (struct mpc_config_processor *m)
+static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
{
int cpu;
unsigned char ver;
- static int found_bsp=0;
+ cpumask_t tmp_map;
if (!(m->mpc_cpuflag & CPU_ENABLED)) {
disabled_cpus++;
@@ -133,8 +133,10 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
return;
}
- cpu = num_processors++;
-
+ num_processors++;
+ cpus_complement(tmp_map, cpu_present_map);
+ cpu = first_cpu(tmp_map);
+
#if MAX_APICS < 255
if ((int)m->mpc_apicid > MAX_APICS) {
printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
@@ -160,12 +162,7 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
* entry is BSP, and so on.
*/
cpu = 0;
-
- bios_cpu_apicid[0] = m->mpc_apicid;
- x86_cpu_to_apicid[0] = m->mpc_apicid;
- found_bsp = 1;
- } else
- cpu = num_processors - found_bsp;
+ }
bios_cpu_apicid[cpu] = m->mpc_apicid;
x86_cpu_to_apicid[cpu] = m->mpc_apicid;
@@ -691,7 +688,7 @@ void __init mp_register_lapic_address (
}
-void __init mp_register_lapic (
+void __cpuinit mp_register_lapic (
u8 id,
u8 enabled)
{
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 66c009e10bac..d9e4067faf05 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -534,6 +534,7 @@ asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
void set_nmi_callback(nmi_callback_t callback)
{
+ vmalloc_sync_all();
rcu_assign_pointer(nmi_callback, callback);
}
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index 4ed391edd47a..03c9eeedb0f3 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -73,6 +73,9 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
if (dma_mask == 0)
dma_mask = 0xffffffff;
+ /* Don't invoke OOM killer */
+ gfp |= __GFP_NORETRY;
+
/* Kludge to make it bug-to-bug compatible with i386. i386
uses the normal dma_mask for alloc_coherent. */
dma_mask &= *dev->dma_mask;
diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c
index 0c3f052ba6ce..a6c01e121266 100644
--- a/arch/x86_64/kernel/pci-gart.c
+++ b/arch/x86_64/kernel/pci-gart.c
@@ -65,9 +65,7 @@ static u32 gart_unmapped_entry;
#define for_all_nb(dev) \
dev = NULL; \
- while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)\
- if (dev->bus->number == 0 && \
- (PCI_SLOT(dev->devfn) >= 24) && (PCI_SLOT(dev->devfn) <= 31))
+ while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)
static struct pci_dev *northbridges[MAX_NB];
static u32 northbridge_flush_word[MAX_NB];
@@ -148,9 +146,12 @@ static void flush_gart(struct device *dev)
if (!northbridges[i])
continue;
/* Make sure the hardware actually executed the flush. */
- do {
+ for (;;) {
pci_read_config_dword(northbridges[i], 0x9c, &w);
- } while (w & 1);
+ if (!(w & 1))
+ break;
+ cpu_relax();
+ }
}
if (!flushed)
printk("nothing to flush?\n");
diff --git a/arch/x86_64/kernel/pmtimer.c b/arch/x86_64/kernel/pmtimer.c
index 5c51d10408a6..ee5ee4891f3d 100644
--- a/arch/x86_64/kernel/pmtimer.c
+++ b/arch/x86_64/kernel/pmtimer.c
@@ -86,7 +86,7 @@ static unsigned pmtimer_wait_tick(void)
for (a = b = inl(pmtmr_ioport) & ACPI_PM_MASK;
a == b;
b = inl(pmtmr_ioport) & ACPI_PM_MASK)
- ;
+ cpu_relax();
return b;
}
@@ -97,6 +97,7 @@ void pmtimer_wait(unsigned us)
a = pmtimer_wait_tick();
do {
b = inl(pmtmr_ioport);
+ cpu_relax();
} while (cyc2us(b - a) < us);
}
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 22a05dec81a2..81111835722d 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -114,7 +114,7 @@ void exit_idle(void)
* We use this if we don't have any better
* idle routine..
*/
-void default_idle(void)
+static void default_idle(void)
{
local_irq_enable();
@@ -508,7 +508,7 @@ out:
/*
* This special macro can be used to load a debugging register
*/
-#define loaddebug(thread,r) set_debug(thread->debugreg ## r, r)
+#define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
/*
* switch_to(x,y) should switch tasks from x to y.
@@ -527,8 +527,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(init_tss, cpu);
- unlazy_fpu(prev_p);
-
/*
* Reload esp0, LDT and the page table pointer:
*/
@@ -586,11 +584,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
}
/*
- * Switch the PDA context.
+ * Switch the PDA and FPU contexts.
*/
prev->userrsp = read_pda(oldrsp);
write_pda(oldrsp, next->userrsp);
write_pda(pcurrent, next_p);
+ /* This must be here to ensure both math_state_restore() and
+ kernel_fpu_begin() work consistently. */
+ unlazy_fpu(prev_p);
write_pda(kernelstack,
task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
index 53205622351c..d44b2c1e63a6 100644
--- a/arch/x86_64/kernel/ptrace.c
+++ b/arch/x86_64/kernel/ptrace.c
@@ -420,9 +420,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case offsetof(struct user, u_debugreg[7]):
/* See arch/i386/kernel/ptrace.c for an explanation of
* this awkward check.*/
- data &= ~DR_CONTROL_RESERVED;
- for(i=0; i<4; i++)
- if ((0x5454 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
+ data &= ~DR_CONTROL_RESERVED;
+ for(i=0; i<4; i++)
+ if ((0x5554 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
break;
if (i == 4) {
child->thread.debugreg7 = data;
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index aa55e3cec665..a57eec8311a7 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -46,6 +46,7 @@
#include <linux/cpufreq.h>
#include <linux/dmi.h>
#include <linux/dma-mapping.h>
+#include <linux/ctype.h>
#include <asm/mtrr.h>
#include <asm/uaccess.h>
@@ -67,6 +68,7 @@
#include <asm/swiotlb.h>
#include <asm/sections.h>
#include <asm/gart-mapping.h>
+#include <asm/dmi.h>
/*
* Machine setup..
@@ -91,6 +93,12 @@ int bootloader_type;
unsigned long saved_video_mode;
+/*
+ * Early DMI memory
+ */
+int dmi_alloc_index;
+char dmi_alloc_data[DMI_MAX_DATA];
+
/*
* Setup options
*/
@@ -270,6 +278,13 @@ static void __init probe_roms(void)
}
}
+/* Check for full argument with no trailing characters */
+static int fullarg(char *p, char *arg)
+{
+ int l = strlen(arg);
+ return !memcmp(p, arg, l) && (p[l] == 0 || isspace(p[l]));
+}
+
static __init void parse_cmdline_early (char ** cmdline_p)
{
char c = ' ', *to = command_line, *from = COMMAND_LINE;
@@ -293,10 +308,10 @@ static __init void parse_cmdline_early (char ** cmdline_p)
#endif
#ifdef CONFIG_ACPI
/* "acpi=off" disables both ACPI table parsing and interpreter init */
- if (!memcmp(from, "acpi=off", 8))
+ if (fullarg(from,"acpi=off"))
disable_acpi();
- if (!memcmp(from, "acpi=force", 10)) {
+ if (fullarg(from, "acpi=force")) {
/* add later when we do DMI horrors: */
acpi_force = 1;
acpi_disabled = 0;
@@ -304,52 +319,47 @@ static __init void parse_cmdline_early (char ** cmdline_p)
/* acpi=ht just means: do ACPI MADT parsing
at bootup, but don't enable the full ACPI interpreter */
- if (!memcmp(from, "acpi=ht", 7)) {
+ if (fullarg(from, "acpi=ht")) {
if (!acpi_force)
disable_acpi();
acpi_ht = 1;
}
- else if (!memcmp(from, "pci=noacpi", 10))
+ else if (fullarg(from, "pci=noacpi"))
acpi_disable_pci();
- else if (!memcmp(from, "acpi=noirq", 10))
+ else if (fullarg(from, "acpi=noirq"))
acpi_noirq_set();
- else if (!memcmp(from, "acpi_sci=edge", 13))
+ else if (fullarg(from, "acpi_sci=edge"))
acpi_sci_flags.trigger = 1;
- else if (!memcmp(from, "acpi_sci=level", 14))
+ else if (fullarg(from, "acpi_sci=level"))
acpi_sci_flags.trigger = 3;
- else if (!memcmp(from, "acpi_sci=high", 13))
+ else if (fullarg(from, "acpi_sci=high"))
acpi_sci_flags.polarity = 1;
- else if (!memcmp(from, "acpi_sci=low", 12))
+ else if (fullarg(from, "acpi_sci=low"))
acpi_sci_flags.polarity = 3;
/* acpi=strict disables out-of-spec workarounds */
- else if (!memcmp(from, "acpi=strict", 11)) {
+ else if (fullarg(from, "acpi=strict")) {
acpi_strict = 1;
}
#ifdef CONFIG_X86_IO_APIC
- else if (!memcmp(from, "acpi_skip_timer_override", 24))
+ else if (fullarg(from, "acpi_skip_timer_override"))
acpi_skip_timer_override = 1;
#endif
#endif
- if (!memcmp(from, "disable_timer_pin_1", 19))
+ if (fullarg(from, "disable_timer_pin_1"))
disable_timer_pin_1 = 1;
- if (!memcmp(from, "enable_timer_pin_1", 18))
+ if (fullarg(from, "enable_timer_pin_1"))
disable_timer_pin_1 = -1;
- if (!memcmp(from, "nolapic", 7) ||
- !memcmp(from, "disableapic", 11))
+ if (fullarg(from, "nolapic") || fullarg(from, "disableapic"))
disable_apic = 1;
- /* Don't confuse with noapictimer */
- if (!memcmp(from, "noapic", 6) &&
- (from[6] == ' ' || from[6] == 0))
+ if (fullarg(from, "noapic"))
skip_ioapic_setup = 1;
- /* Make sure to not confuse with apic= */
- if (!memcmp(from, "apic", 4) &&
- (from[4] == ' ' || from[4] == 0)) {
+ if (fullarg(from,"apic")) {
skip_ioapic_setup = 0;
ioapic_force = 1;
}
@@ -388,7 +398,7 @@ static __init void parse_cmdline_early (char ** cmdline_p)
iommu_setup(from+6);
}
- if (!memcmp(from,"oops=panic", 10))
+ if (fullarg(from,"oops=panic"))
panic_on_oops = 1;
if (!memcmp(from, "noexec=", 7))
@@ -611,11 +621,14 @@ void __init setup_arch(char **cmdline_p)
* we are rounding upwards:
*/
end_pfn = e820_end_of_ram();
+ num_physpages = end_pfn; /* for pfn_valid */
check_efer();
init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
+ dmi_scan_machine();
+
zap_low_mappings(0);
#ifdef CONFIG_ACPI
@@ -708,6 +721,12 @@ void __init setup_arch(char **cmdline_p)
check_ioapic();
+ /*
+ * set this early, so we dont allocate cpu0
+ * if MADT list doesnt list BSP first
+ * mpparse.c/MP_processor_info() allocates logical cpu numbers.
+ */
+ cpu_set(0, cpu_present_map);
#ifdef CONFIG_ACPI
/*
* Read APIC and some other early information from ACPI tables.
@@ -836,7 +855,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
unsigned bits;
#ifdef CONFIG_NUMA
int node = 0;
- unsigned apicid = phys_proc_id[cpu];
+ unsigned apicid = hard_smp_processor_id();
#endif
bits = 0;
@@ -846,7 +865,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
/* Low order bits define the core id (index of core in socket) */
cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
/* Convert the APIC ID into the socket ID */
- phys_proc_id[cpu] >>= bits;
+ phys_proc_id[cpu] = phys_pkg_id(bits);
#ifdef CONFIG_NUMA
node = phys_proc_id[cpu];
@@ -872,8 +891,8 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
}
numa_set_node(cpu, node);
- printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
- cpu, c->x86_max_cores, node, cpu_core_id[cpu]);
+ printk(KERN_INFO "CPU %d/%x(%d) -> Node %d -> Core %d\n",
+ cpu, apicid, c->x86_max_cores, node, cpu_core_id[cpu]);
#endif
#endif
}
@@ -927,8 +946,6 @@ static int __init init_amd(struct cpuinfo_x86 *c)
if (c->extended_cpuid_level >= 0x80000008) {
c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
- if (c->x86_max_cores & (c->x86_max_cores - 1))
- c->x86_max_cores = 1;
amd_detect_cmp(c);
}
@@ -1261,7 +1278,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* Intel-defined (#2) */
- "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
+ "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
"tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
@@ -1344,8 +1361,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
{
int i;
for ( i = 0 ; i < 32*NCAPINTS ; i++ )
- if ( test_bit(i, &c->x86_capability) &&
- x86_cap_flags[i] != NULL )
+ if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
seq_printf(m, " %s", x86_cap_flags[i]);
}
@@ -1403,10 +1419,3 @@ struct seq_operations cpuinfo_op = {
.show = show_cpuinfo,
};
-static int __init run_dmi_scan(void)
-{
- dmi_scan_machine();
- return 0;
-}
-core_initcall(run_dmi_scan);
-
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 70f1bb808a20..eabdb63fec31 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -33,7 +33,7 @@ cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
-struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table };
+struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
@@ -59,7 +59,7 @@ int __init nonx_setup(char *str)
}
__setup("noexec=", nonx_setup); /* parsed early actually */
-int force_personality32 = READ_IMPLIES_EXEC;
+int force_personality32 = 0;
/* noexec32=on|off
Control non executable heap for 32bit processes.
@@ -248,7 +248,7 @@ void __cpuinit cpu_init (void)
switch (v + 1) {
#if DEBUG_STKSZ > EXCEPTION_STKSZ
case DEBUG_STACK:
- cpu_pda[cpu].debugstack = (unsigned long)estacks;
+ cpu_pda(cpu)->debugstack = (unsigned long)estacks;
estacks += DEBUG_STKSZ;
break;
#endif
@@ -281,12 +281,12 @@ void __cpuinit cpu_init (void)
* Clear all 6 debug registers:
*/
- set_debug(0UL, 0);
- set_debug(0UL, 1);
- set_debug(0UL, 2);
- set_debug(0UL, 3);
- set_debug(0UL, 6);
- set_debug(0UL, 7);
+ set_debugreg(0UL, 0);
+ set_debugreg(0UL, 1);
+ set_debugreg(0UL, 2);
+ set_debugreg(0UL, 3);
+ set_debugreg(0UL, 6);
+ set_debugreg(0UL, 7);
fpu_init();
}
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 19ef012b1f17..4a6628b14d99 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -75,7 +75,7 @@ static inline void leave_mm(int cpu)
{
if (read_pda(mmu_state) == TLBSTATE_OK)
BUG();
- clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask);
+ cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
load_cr3(swapper_pg_dir);
}
@@ -85,7 +85,7 @@ static inline void leave_mm(int cpu)
* [cpu0: the cpu that switches]
* 1) switch_mm() either 1a) or 1b)
* 1a) thread switch to a different mm
- * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask);
+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
* Stop ipi delivery for the old mm. This is not synchronized with
* the other cpus, but smp_invalidate_interrupt ignore flush ipis
* for the wrong mm, and in the worst case we perform a superfluous
@@ -95,7 +95,7 @@ static inline void leave_mm(int cpu)
* was in lazy tlb mode.
* 1a3) update cpu active_mm
* Now cpu0 accepts tlb flushes for the new mm.
- * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask);
+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
* Now the other cpus will send tlb flush ipis.
* 1a4) change cr3.
* 1b) thread switch without mm change
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index ee5ce3d3cbc3..7f58fa682491 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -88,7 +88,8 @@ static inline unsigned int do_gettimeoffset_tsc(void)
unsigned long t;
unsigned long x;
t = get_cycles_sync();
- if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
+ if (t < vxtime.last_tsc)
+ t = vxtime.last_tsc; /* hack */
x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
return x;
}
@@ -178,8 +179,9 @@ unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
- /* Assume the lock function has either no stack frame or only a single word.
- This checks if the address on the stack looks like a kernel text address.
+ /* Assume the lock function has either no stack frame or only a single
+ word. This checks if the address on the stack looks like a kernel
+ text address.
There is a small window for false hits, but in that case the tick
is just accounted to the spinlock function.
Better would be to write these functions in assembler again
@@ -242,17 +244,10 @@ static void set_rtc_mmss(unsigned long nowtime)
real_minutes += 30; /* correct for half hour time zone */
real_minutes %= 60;
-#if 0
- /* AMD 8111 is a really bad time keeper and hits this regularly.
- It probably was an attempt to avoid screwing up DST, but ignore
- that for now. */
if (abs(real_minutes - cmos_minutes) >= 30) {
printk(KERN_WARNING "time.c: can't update CMOS clock "
"from %d to %d\n", cmos_minutes, real_minutes);
- } else
-#endif
-
- {
+ } else {
BIN_TO_BCD(real_seconds);
BIN_TO_BCD(real_minutes);
CMOS_WRITE(real_seconds, RTC_SECONDS);
@@ -293,8 +288,7 @@ unsigned long long monotonic_clock(void)
this_offset = hpet_readl(HPET_COUNTER);
} while (read_seqretry(&xtime_lock, seq));
offset = (this_offset - last_offset);
- offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
- return base + offset;
+ offset *= (NSEC_PER_SEC/HZ) / hpet_tick;
} else {
do {
seq = read_seqbegin(&xtime_lock);
@@ -303,50 +297,46 @@ unsigned long long monotonic_clock(void)
base = monotonic_base;
} while (read_seqretry(&xtime_lock, seq));
this_offset = get_cycles_sync();
- offset = (this_offset - last_offset)*1000/cpu_khz;
- return base + offset;
+ offset = (this_offset - last_offset)*1000 / cpu_khz;
}
+ return base + offset;
}
EXPORT_SYMBOL(monotonic_clock);
static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
{
- static long lost_count;
- static int warned;
-
- if (report_lost_ticks) {
- printk(KERN_WARNING "time.c: Lost %d timer "
- "tick(s)! ", lost);
- print_symbol("rip %s)\n", regs->rip);
- }
-
- if (lost_count == 1000 && !warned) {
- printk(KERN_WARNING
- "warning: many lost ticks.\n"
- KERN_WARNING "Your time source seems to be instable or "
+ static long lost_count;
+ static int warned;
+ if (report_lost_ticks) {
+ printk(KERN_WARNING "time.c: Lost %d timer tick(s)! ", lost);
+ print_symbol("rip %s)\n", regs->rip);
+ }
+
+ if (lost_count == 1000 && !warned) {
+ printk(KERN_WARNING "warning: many lost ticks.\n"
+ KERN_WARNING "Your time source seems to be instable or "
"some driver is hogging interupts\n");
- print_symbol("rip %s\n", regs->rip);
- if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
- printk(KERN_WARNING "Falling back to HPET\n");
- if (hpet_use_timer)
- vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
- else
- vxtime.last = hpet_readl(HPET_COUNTER);
- vxtime.mode = VXTIME_HPET;
- do_gettimeoffset = do_gettimeoffset_hpet;
- }
- /* else should fall back to PIT, but code missing. */
- warned = 1;
- } else
- lost_count++;
+ print_symbol("rip %s\n", regs->rip);
+ if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
+ printk(KERN_WARNING "Falling back to HPET\n");
+ if (hpet_use_timer)
+ vxtime.last = hpet_readl(HPET_T0_CMP) -
+ hpet_tick;
+ else
+ vxtime.last = hpet_readl(HPET_COUNTER);
+ vxtime.mode = VXTIME_HPET;
+ do_gettimeoffset = do_gettimeoffset_hpet;
+ }
+ /* else should fall back to PIT, but code missing. */
+ warned = 1;
+ } else
+ lost_count++;
#ifdef CONFIG_CPU_FREQ
- /* In some cases the CPU can change frequency without us noticing
- (like going into thermal throttle)
- Give cpufreq a change to catch up. */
- if ((lost_count+1) % 25 == 0) {
- cpufreq_delayed_get();
- }
+ /* In some cases the CPU can change frequency without us noticing
+ Give cpufreq a change to catch up. */
+ if ((lost_count+1) % 25 == 0)
+ cpufreq_delayed_get();
#endif
}
@@ -354,7 +344,7 @@ void main_timer_handler(struct pt_regs *regs)
{
static unsigned long rtc_update = 0;
unsigned long tsc;
- int delay, offset = 0, lost = 0;
+ int delay = 0, offset = 0, lost = 0;
/*
* Here we are in the timer irq handler. We have irqs locally disabled (so we
@@ -375,7 +365,7 @@ void main_timer_handler(struct pt_regs *regs)
*/
offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
delay = hpet_readl(HPET_COUNTER) - offset;
- } else {
+ } else if (!pmtmr_ioport) {
spin_lock(&i8253_lock);
outb_p(0x00, 0x43);
delay = inb_p(0x40);
@@ -517,6 +507,7 @@ static unsigned long get_cmos_time(void)
unsigned int timeout = 1000000, year, mon, day, hour, min, sec;
unsigned char uip = 0, this = 0;
unsigned long flags;
+ unsigned extyear = 0;
/*
* The Linux interpretation of the CMOS clock register contents: When the
@@ -545,12 +536,17 @@ static unsigned long get_cmos_time(void)
mon = CMOS_READ(RTC_MONTH);
year = CMOS_READ(RTC_YEAR);
+#ifdef CONFIG_ACPI
+ if (acpi_fadt.revision >= FADT2_REVISION_ID && acpi_fadt.century)
+ extyear = CMOS_READ(acpi_fadt.century);
+#endif
+
spin_unlock_irqrestore(&rtc_lock, flags);
/*
* We know that x86-64 always uses BCD format, no need to check the
* config register.
- */
+ */
BCD_TO_BIN(sec);
BCD_TO_BIN(min);
@@ -559,11 +555,17 @@ static unsigned long get_cmos_time(void)
BCD_TO_BIN(mon);
BCD_TO_BIN(year);
- /*
- * x86-64 systems only exists since 2002.
- * This will work up to Dec 31, 2100
- */
- year += 2000;
+ if (extyear) {
+ BCD_TO_BIN(extyear);
+ year += extyear;
+ printk(KERN_INFO "Extended CMOS year: %d\n", extyear);
+ } else {
+ /*
+ * x86-64 systems only exists since 2002.
+ * This will work up to Dec 31, 2100
+ */
+ year += 2000;
+ }
return mktime(year, mon, day, hour, min, sec);
}
@@ -606,7 +608,8 @@ static void cpufreq_delayed_get(void)
cpufreq_delayed_issched = 1;
if (!warned) {
warned = 1;
- printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
+ printk(KERN_DEBUG
+ "Losing some ticks... checking if CPU frequency changed.\n");
}
schedule_work(&cpufreq_delayed_get_work);
}
@@ -629,9 +632,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
lpj = &dummy;
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
#ifdef CONFIG_SMP
- lpj = &cpu_data[freq->cpu].loops_per_jiffy;
+ lpj = &cpu_data[freq->cpu].loops_per_jiffy;
#else
- lpj = &boot_cpu_data.loops_per_jiffy;
+ lpj = &boot_cpu_data.loops_per_jiffy;
#endif
if (!ref_freq) {
@@ -768,9 +771,8 @@ static __init int late_hpet_init(void)
int i;
hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
-
- for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
- timer++, i++)
+ timer = &hpet->hpet_timers[2];
+ for (i = 2; i < ntimer; timer++, i++)
hd.hd_irq[i] = (timer->hpet_config &
Tn_INT_ROUTE_CNF_MASK) >>
Tn_INT_ROUTE_CNF_SHIFT;
@@ -927,8 +929,7 @@ void __init time_init(void)
-xtime.tv_sec, -xtime.tv_nsec);
if (!hpet_init())
- vxtime_hz = (1000000000000000L + hpet_period / 2) /
- hpet_period;
+ vxtime_hz = (1000000000000000L + hpet_period / 2) / hpet_period;
else
vxtime.hpet_address = 0;
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 28d50dc540e8..7b148309c529 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -47,8 +47,6 @@
#include <asm/proto.h>
#include <asm/nmi.h>
-extern struct gate_struct idt_table[256];
-
asmlinkage void divide_error(void);
asmlinkage void debug(void);
asmlinkage void nmi(void);
@@ -78,6 +76,8 @@ int register_die_notifier(struct notifier_block *nb)
{
int err = 0;
unsigned long flags;
+
+ vmalloc_sync_all();
spin_lock_irqsave(&die_notifier_lock, flags);
err = notifier_chain_register(&die_chain, nb);
spin_unlock_irqrestore(&die_notifier_lock, flags);
@@ -122,7 +122,7 @@ int printk_address(unsigned long address)
if (!modname)
modname = delim = "";
return printk("<%016lx>{%s%s%s%s%+ld}",
- address,delim,modname,delim,symname,offset);
+ address, delim, modname, delim, symname, offset);
}
#else
int printk_address(unsigned long address)
@@ -334,13 +334,12 @@ void show_registers(struct pt_regs *regs)
show_stack(NULL, (unsigned long*)rsp);
printk("\nCode: ");
- if(regs->rip < PAGE_OFFSET)
+ if (regs->rip < PAGE_OFFSET)
goto bad;
- for(i=0;i<20;i++)
- {
+ for (i=0; i<20; i++) {
unsigned char c;
- if(__get_user(c, &((unsigned char*)regs->rip)[i])) {
+ if (__get_user(c, &((unsigned char*)regs->rip)[i])) {
bad:
printk(" Bad RIP value.");
break;
@@ -479,7 +478,7 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
printk(KERN_INFO
"%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
tsk->comm, tsk->pid, str,
- regs->rip,regs->rsp,error_code);
+ regs->rip, regs->rsp, error_code);
if (info)
force_sig_info(signr, info, tsk);
@@ -493,9 +492,9 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->rip);
- if (fixup) {
+ if (fixup)
regs->rip = fixup->fixup;
- } else
+ else
die(str, regs, error_code);
return;
}
@@ -568,7 +567,7 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
printk(KERN_INFO
"%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
tsk->comm, tsk->pid,
- regs->rip,regs->rsp,error_code);
+ regs->rip, regs->rsp, error_code);
force_sig(SIGSEGV, tsk);
return;
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 74db0062d4a2..39ff0708f803 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -20,6 +20,12 @@ SECTIONS
phys_startup_64 = startup_64 - LOAD_OFFSET;
_text = .; /* Text and read-only data */
.text : AT(ADDR(.text) - LOAD_OFFSET) {
+ /* First the code that has to be first for bootstrapping */
+ *(.bootstrap.text)
+ /* Then all the functions that are "hot" in profiles, to group them
+ onto the same hugetlb entry */
+ #include "functionlist"
+ /* Then the rest */
*(.text)
SCHED_TEXT
LOCK_TEXT
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index c9dc7e46731e..d96a9348e5a2 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -144,16 +144,12 @@ EXPORT_SYMBOL(rwsem_down_write_failed_thunk);
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(die_chain);
-EXPORT_SYMBOL(register_die_notifier);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(cpu_sibling_map);
EXPORT_SYMBOL(smp_num_siblings);
#endif
-extern void do_softirq_thunk(void);
-EXPORT_SYMBOL(do_softirq_thunk);
-
#ifdef CONFIG_BUG
EXPORT_SYMBOL(out_of_line_bug);
#endif
diff --git a/arch/x86_64/lib/thunk.S b/arch/x86_64/lib/thunk.S
index acc1e2ca7ed7..e49af0032e94 100644
--- a/arch/x86_64/lib/thunk.S
+++ b/arch/x86_64/lib/thunk.S
@@ -42,7 +42,6 @@
thunk rwsem_wake_thunk,rwsem_wake
thunk rwsem_downgrade_thunk,rwsem_downgrade_wake
#endif
- thunk do_softirq_thunk,do_softirq
thunk __down_failed,__down
thunk_retrax __down_failed_interruptible,__down_interruptible
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index 2e7c3c8ffe03..316c53de47bd 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -264,6 +264,8 @@ static int vmalloc_fault(unsigned long address)
return -1;
if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
+ else
+ BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
/* Below here mismatches are bugs because these lower tables
are shared */
@@ -312,21 +314,13 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
unsigned long flags;
siginfo_t info;
+ tsk = current;
+ mm = tsk->mm;
+ prefetchw(&mm->mmap_sem);
+
/* get the address */
__asm__("movq %%cr2,%0":"=r" (address));
- if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
- SIGSEGV) == NOTIFY_STOP)
- return;
-
- if (likely(regs->eflags & X86_EFLAGS_IF))
- local_irq_enable();
- if (unlikely(page_fault_trace))
- printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
- regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
-
- tsk = current;
- mm = tsk->mm;
info.si_code = SEGV_MAPERR;
@@ -351,10 +345,12 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
*/
if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
((address >= VMALLOC_START && address < VMALLOC_END))) {
- if (vmalloc_fault(address) < 0)
- goto bad_area_nosemaphore;
- return;
+ if (vmalloc_fault(address) >= 0)
+ return;
}
+ if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ SIGSEGV) == NOTIFY_STOP)
+ return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
* fault we could otherwise deadlock.
@@ -362,6 +358,17 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
goto bad_area_nosemaphore;
}
+ if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ SIGSEGV) == NOTIFY_STOP)
+ return;
+
+ if (likely(regs->eflags & X86_EFLAGS_IF))
+ local_irq_enable();
+
+ if (unlikely(page_fault_trace))
+ printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
+ regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
+
if (unlikely(error_code & PF_RSVD))
pgtable_bad(address, regs, error_code);
@@ -571,6 +578,48 @@ do_sigbus:
return;
}
+DEFINE_SPINLOCK(pgd_lock);
+struct page *pgd_list;
+
+void vmalloc_sync_all(void)
+{
+ /* Note that races in the updates of insync and start aren't
+ problematic:
+ insync can only get set bits added, and updates to start are only
+ improving performance (without affecting correctness if undone). */
+ static DECLARE_BITMAP(insync, PTRS_PER_PGD);
+ static unsigned long start = VMALLOC_START & PGDIR_MASK;
+ unsigned long address;
+
+ for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
+ if (!test_bit(pgd_index(address), insync)) {
+ const pgd_t *pgd_ref = pgd_offset_k(address);
+ struct page *page;
+
+ if (pgd_none(*pgd_ref))
+ continue;
+ spin_lock(&pgd_lock);
+ for (page = pgd_list; page;
+ page = (struct page *)page->index) {
+ pgd_t *pgd;
+ pgd = (pgd_t *)page_address(page) + pgd_index(address);
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+ else
+ BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
+ }
+ spin_unlock(&pgd_lock);
+ set_bit(pgd_index(address), insync);
+ }
+ if (address == start)
+ start = address + PGDIR_SIZE;
+ }
+ /* Check that there is no need to do the same for the modules area. */
+ BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
+ BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
+ (__START_KERNEL & PGDIR_MASK)));
+}
+
static int __init enable_pagefaulttrace(char *str)
{
page_fault_trace = 1;
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 40ed13d263cd..b04415625442 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -94,7 +94,7 @@ void show_mem(void)
int after_bootmem;
-static void *spp_getpage(void)
+static __init void *spp_getpage(void)
{
void *ptr;
if (after_bootmem)
@@ -108,7 +108,7 @@ static void *spp_getpage(void)
return ptr;
}
-static void set_pte_phys(unsigned long vaddr,
+static __init void set_pte_phys(unsigned long vaddr,
unsigned long phys, pgprot_t prot)
{
pgd_t *pgd;
@@ -157,7 +157,8 @@ static void set_pte_phys(unsigned long vaddr,
}
/* NOTE: this is meant to be run only at boot */
-void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
+void __init
+__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
{
unsigned long address = __fix_to_virt(idx);
@@ -225,6 +226,33 @@ static __meminit void unmap_low_page(int i)
ti->allocated = 0;
}
+/* Must run before zap_low_mappings */
+__init void *early_ioremap(unsigned long addr, unsigned long size)
+{
+ unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
+
+ /* actually usually some more */
+ if (size >= LARGE_PAGE_SIZE) {
+ printk("SMBIOS area too long %lu\n", size);
+ return NULL;
+ }
+ set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
+ map += LARGE_PAGE_SIZE;
+ set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
+ __flush_tlb();
+ return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
+}
+
+/* To avoid virtual aliases later */
+__init void early_iounmap(void *addr, unsigned long size)
+{
+ if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
+ printk("early_iounmap: bad address %p\n", addr);
+ set_pmd(temp_mappings[0].pmd, __pmd(0));
+ set_pmd(temp_mappings[1].pmd, __pmd(0));
+ __flush_tlb();
+}
+
static void __meminit
phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
{
@@ -344,7 +372,7 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end)
pud_t *pud;
if (after_bootmem)
- pud = pud_offset_k(pgd, __PAGE_OFFSET);
+ pud = pud_offset_k(pgd, start & PGDIR_MASK);
else
pud = alloc_low_page(&map, &pud_phys);
diff --git a/arch/x86_64/mm/k8topology.c b/arch/x86_64/mm/k8topology.c
index dd60e71fdba6..7c45c2d2b8b2 100644
--- a/arch/x86_64/mm/k8topology.c
+++ b/arch/x86_64/mm/k8topology.c
@@ -43,7 +43,7 @@ static __init int find_northbridge(void)
int __init k8_scan_nodes(unsigned long start, unsigned long end)
{
unsigned long prevbase;
- struct node nodes[8];
+ struct bootnode nodes[8];
int nodeid, i, nb;
unsigned char nodeids[8];
int found = 0;
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 22e51beee8d3..63c72641b737 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -25,8 +25,7 @@
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
bootmem_data_t plat_node_bdata[MAX_NUMNODES];
-int memnode_shift;
-u8 memnodemap[NODEMAPSIZE];
+struct memnode memnode;
unsigned char cpu_to_node[NR_CPUS] __read_mostly = {
[0 ... NR_CPUS-1] = NUMA_NO_NODE
@@ -47,7 +46,7 @@ int numa_off __initdata;
* -1 if node overlap or lost ram (shift too big)
*/
static int __init
-populate_memnodemap(const struct node *nodes, int numnodes, int shift)
+populate_memnodemap(const struct bootnode *nodes, int numnodes, int shift)
{
int i;
int res = -1;
@@ -74,7 +73,7 @@ populate_memnodemap(const struct node *nodes, int numnodes, int shift)
return res;
}
-int __init compute_hash_shift(struct node *nodes, int numnodes)
+int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
{
int shift = 20;
@@ -149,7 +148,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start, unsigned long en
/* Initialize final allocator for a zone */
void __init setup_node_zones(int nodeid)
{
- unsigned long start_pfn, end_pfn;
+ unsigned long start_pfn, end_pfn, memmapsize, limit;
unsigned long zones[MAX_NR_ZONES];
unsigned long holes[MAX_NR_ZONES];
@@ -159,6 +158,16 @@ void __init setup_node_zones(int nodeid)
Dprintk(KERN_INFO "Setting up node %d %lx-%lx\n",
nodeid, start_pfn, end_pfn);
+ /* Try to allocate mem_map at end to not fill up precious <4GB
+ memory. */
+ memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
+ limit = end_pfn << PAGE_SHIFT;
+ NODE_DATA(nodeid)->node_mem_map =
+ __alloc_bootmem_core(NODE_DATA(nodeid)->bdata,
+ memmapsize, SMP_CACHE_BYTES,
+ round_down(limit - memmapsize, PAGE_SIZE),
+ limit);
+
size_zones(zones, holes, start_pfn, end_pfn);
free_area_init_node(nodeid, NODE_DATA(nodeid), zones,
start_pfn, holes);
@@ -191,7 +200,7 @@ int numa_fake __initdata = 0;
static int numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
{
int i;
- struct node nodes[MAX_NUMNODES];
+ struct bootnode nodes[MAX_NUMNODES];
unsigned long sz = ((end_pfn - start_pfn)<<PAGE_SHIFT) / numa_fake;
/* Kludge needed for the hash function */
@@ -357,8 +366,7 @@ void __init init_cpu_to_node(void)
EXPORT_SYMBOL(cpu_to_node);
EXPORT_SYMBOL(node_to_cpumask);
-EXPORT_SYMBOL(memnode_shift);
-EXPORT_SYMBOL(memnodemap);
+EXPORT_SYMBOL(memnode);
EXPORT_SYMBOL(node_data);
#ifdef CONFIG_DISCONTIGMEM
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c
index 482c25767369..2eb879590dc4 100644
--- a/arch/x86_64/mm/srat.c
+++ b/arch/x86_64/mm/srat.c
@@ -23,7 +23,7 @@ static struct acpi_table_slit *acpi_slit;
static nodemask_t nodes_parsed __initdata;
static nodemask_t nodes_found __initdata;
-static struct node nodes[MAX_NUMNODES] __initdata;
+static struct bootnode nodes[MAX_NUMNODES] __initdata;
static u8 pxm2node[256] = { [0 ... 255] = 0xff };
/* Too small nodes confuse the VM badly. Usually they result
@@ -57,7 +57,7 @@ static __init int conflicting_nodes(unsigned long start, unsigned long end)
{
int i;
for_each_node_mask(i, nodes_parsed) {
- struct node *nd = &nodes[i];
+ struct bootnode *nd = &nodes[i];
if (nd->start == nd->end)
continue;
if (nd->end > start && nd->start < end)
@@ -70,7 +70,7 @@ static __init int conflicting_nodes(unsigned long start, unsigned long end)
static __init void cutoff_node(int i, unsigned long start, unsigned long end)
{
- struct node *nd = &nodes[i];
+ struct bootnode *nd = &nodes[i];
if (nd->start < start) {
nd->start = start;
if (nd->end < nd->start)
@@ -159,7 +159,7 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
void __init
acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
{
- struct node *nd;
+ struct bootnode *nd;
unsigned long start, end;
int node, pxm;
int i;
diff --git a/arch/x86_64/pci/mmconfig.c b/arch/x86_64/pci/mmconfig.c
index 18f371fe37f8..e616500207e4 100644
--- a/arch/x86_64/pci/mmconfig.c
+++ b/arch/x86_64/pci/mmconfig.c
@@ -55,7 +55,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
{
char __iomem *addr;
- if (seg == 0 && bus == 0 && test_bit(PCI_SLOT(devfn), &fallback_slots))
+ if (seg == 0 && bus == 0 && test_bit(PCI_SLOT(devfn), fallback_slots))
return NULL;
addr = get_virt(seg, bus);
if (!addr)
@@ -143,29 +143,29 @@ static __init void unreachable_devices(void)
continue;
addr = pci_dev_base(0, 0, PCI_DEVFN(i, 0));
if (addr == NULL|| readl(addr) != val1) {
- set_bit(i, &fallback_slots);
+ set_bit(i, fallback_slots);
}
}
}
-static int __init pci_mmcfg_init(void)
+void __init pci_mmcfg_init(void)
{
int i;
if ((pci_probe & PCI_PROBE_MMCONF) == 0)
- return 0;
+ return;
acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg);
if ((pci_mmcfg_config_num == 0) ||
(pci_mmcfg_config == NULL) ||
(pci_mmcfg_config[0].base_address == 0))
- return 0;
+ return;
/* RED-PEN i386 doesn't do _nocache right now */
pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) * pci_mmcfg_config_num, GFP_KERNEL);
if (pci_mmcfg_virt == NULL) {
printk("PCI: Can not allocate memory for mmconfig structures\n");
- return 0;
+ return;
}
for (i = 0; i < pci_mmcfg_config_num; ++i) {
pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
@@ -173,7 +173,7 @@ static int __init pci_mmcfg_init(void)
if (!pci_mmcfg_virt[i].virt) {
printk("PCI: Cannot map mmconfig aperture for segment %d\n",
pci_mmcfg_config[i].pci_segment_group_number);
- return 0;
+ return;
}
printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address);
}
@@ -182,8 +182,4 @@ static int __init pci_mmcfg_init(void)
raw_pci_ops = &pci_mmcfg;
pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
-
- return 0;
}
-
-arch_initcall(pci_mmcfg_init);