aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/Kconfig1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c21
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c31
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.h2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c10
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.c1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-smi.c2
-rw-r--r--arch/i386/kernel/i8253.c2
-rw-r--r--arch/i386/kernel/io_apic.c4
-rw-r--r--arch/i386/mach-voyager/setup.c8
-rw-r--r--arch/i386/mach-voyager/voyager_cat.c4
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c97
-rw-r--r--arch/i386/mach-voyager/voyager_thread.c69
-rw-r--r--arch/i386/pci/fixup.c2
-rw-r--r--arch/i386/pci/i386.c4
16 files changed, 124 insertions, 140 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index c6f8d6856c4d..a9af760c7e5f 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -1057,6 +1057,7 @@ config PCI
bool "PCI support" if !X86_VISWS
depends on !X86_VOYAGER
default y if X86_VISWS
+ select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
help
Find out whether you have a PCI motherboard. PCI is the name of a
bus system, i.e. the way the CPU talks to the other stuff inside
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index 2b030d6ccbf7..a3df9c039bd4 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -590,20 +590,23 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
static int enable_arbiter_disable(void)
{
struct pci_dev *dev;
+ int status;
int reg;
u8 pci_cmd;
+ status = 1;
/* Find PLE133 host bridge */
reg = 0x78;
- dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, NULL);
+ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0,
+ NULL);
/* Find CLE266 host bridge */
if (dev == NULL) {
reg = 0x76;
- dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL);
+ dev = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_862X_0, NULL);
/* Find CN400 V-Link host bridge */
if (dev == NULL)
- dev = pci_find_device(PCI_VENDOR_ID_VIA, 0x7259, NULL);
-
+ dev = pci_get_device(PCI_VENDOR_ID_VIA, 0x7259, NULL);
}
if (dev != NULL) {
/* Enable access to port 0x22 */
@@ -615,10 +618,11 @@ static int enable_arbiter_disable(void)
if (!(pci_cmd & 1<<7)) {
printk(KERN_ERR PFX
"Can't enable access to port 0x22.\n");
- return 0;
+ status = 0;
}
}
- return 1;
+ pci_dev_put(dev);
+ return status;
}
return 0;
}
@@ -629,7 +633,7 @@ static int longhaul_setup_vt8235(void)
u8 pci_cmd;
/* Find VT8235 southbridge */
- dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL);
+ dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL);
if (dev != NULL) {
/* Set transition time to max */
pci_read_config_byte(dev, 0xec, &pci_cmd);
@@ -641,6 +645,7 @@ static int longhaul_setup_vt8235(void)
pci_read_config_byte(dev, 0xe5, &pci_cmd);
pci_cmd |= 1 << 7;
pci_write_config_byte(dev, 0xe5, pci_cmd);
+ pci_dev_put(dev);
return 1;
}
return 0;
@@ -678,7 +683,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
sizeof(samuel2_eblcr));
break;
case 1 ... 15:
- longhaul_version = TYPE_LONGHAUL_V2;
+ longhaul_version = TYPE_LONGHAUL_V1;
if (c->x86_mask < 8) {
cpu_model = CPU_SAMUEL2;
cpuname = "C3 'Samuel 2' [C5B]";
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index 4786fedca6eb..4c76b511e194 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -27,7 +27,6 @@
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/cpumask.h>
-#include <linux/sched.h> /* current / set_cpus_allowed() */
#include <asm/processor.h>
#include <asm/msr.h>
@@ -62,7 +61,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
if (!cpu_online(cpu) || (newstate > DC_DISABLE) || (newstate == DC_RESV))
return -EINVAL;
- rdmsr(MSR_IA32_THERM_STATUS, l, h);
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
if (l & 0x01)
dprintk("CPU#%d currently thermal throttled\n", cpu);
@@ -70,10 +69,10 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT))
newstate = DC_38PT;
- rdmsr(MSR_IA32_THERM_CONTROL, l, h);
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
if (newstate == DC_DISABLE) {
dprintk("CPU#%d disabling modulation\n", cpu);
- wrmsr(MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
} else {
dprintk("CPU#%d setting duty cycle to %d%%\n",
cpu, ((125 * newstate) / 10));
@@ -84,7 +83,7 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
*/
l = (l & ~14);
l = l | (1<<4) | ((newstate & 0x7)<<1);
- wrmsr(MSR_IA32_THERM_CONTROL, l, h);
+ wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
}
return 0;
@@ -111,7 +110,6 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
{
unsigned int newstate = DC_RESV;
struct cpufreq_freqs freqs;
- cpumask_t cpus_allowed;
int i;
if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate))
@@ -132,17 +130,8 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
/* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
* Developer's Manual, Volume 3
*/
- cpus_allowed = current->cpus_allowed;
-
- for_each_cpu_mask(i, policy->cpus) {
- cpumask_t this_cpu = cpumask_of_cpu(i);
-
- set_cpus_allowed(current, this_cpu);
- BUG_ON(smp_processor_id() != i);
-
+ for_each_cpu_mask(i, policy->cpus)
cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
- }
- set_cpus_allowed(current, cpus_allowed);
/* notifiers */
for_each_cpu_mask(i, policy->cpus) {
@@ -256,17 +245,9 @@ static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
static unsigned int cpufreq_p4_get(unsigned int cpu)
{
- cpumask_t cpus_allowed;
u32 l, h;
- cpus_allowed = current->cpus_allowed;
-
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
- BUG_ON(smp_processor_id() != cpu);
-
- rdmsr(MSR_IA32_THERM_CONTROL, l, h);
-
- set_cpus_allowed(current, cpus_allowed);
+ rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
if (l & 0x10) {
l = l >> 1;
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index fe3b67005ebb..7cf3d207b6b3 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -661,7 +661,8 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst,
dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
data->powernow_table = powernow_table;
- print_basics(data);
+ if (first_cpu(cpu_core_map[data->cpu]) == data->cpu)
+ print_basics(data);
for (j = 0; j < data->numps; j++)
if ((pst[j].fid==data->currfid) && (pst[j].vid==data->currvid))
@@ -814,7 +815,8 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
/* fill in data */
data->numps = data->acpi_data.state_count;
- print_basics(data);
+ if (first_cpu(cpu_core_map[data->cpu]) == data->cpu)
+ print_basics(data);
powernow_k8_acpi_pst_values(data, 0);
/* notify BIOS that we exist */
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
index 0fb2a3001ba5..95be5013c984 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
@@ -215,8 +215,10 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
+#ifdef CONFIG_X86_POWERNOW_K8_ACPI
static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
+#endif
#ifdef CONFIG_SMP
static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index f43b987f952b..35489fd68852 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -720,6 +720,7 @@ static int centrino_target (struct cpufreq_policy *policy,
cpu_set(j, set_mask);
set_cpus_allowed(current, set_mask);
+ preempt_disable();
if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
dprintk("couldn't limit to CPUs in this domain\n");
retval = -EAGAIN;
@@ -727,6 +728,7 @@ static int centrino_target (struct cpufreq_policy *policy,
/* We haven't started the transition yet. */
goto migrate_end;
}
+ preempt_enable();
break;
}
@@ -761,10 +763,13 @@ static int centrino_target (struct cpufreq_policy *policy,
}
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
+ preempt_enable();
break;
+ }
cpu_set(j, covered_cpus);
+ preempt_enable();
}
for_each_cpu_mask(k, online_policy_cpus) {
@@ -796,8 +801,11 @@ static int centrino_target (struct cpufreq_policy *policy,
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
}
+ set_cpus_allowed(current, saved_mask);
+ return 0;
migrate_end:
+ preempt_enable();
set_cpus_allowed(current, saved_mask);
return 0;
}
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
index d59277c00911..b1acc8ce3167 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
@@ -13,7 +13,6 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
-#include <linux/pci.h>
#include <linux/slab.h>
#include <asm/msr.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
index ff0d89806114..e1c509aa3054 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
@@ -17,10 +17,10 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
-#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <asm/ist.h>
+#include <asm/io.h>
#include "speedstep-lib.h"
diff --git a/arch/i386/kernel/i8253.c b/arch/i386/kernel/i8253.c
index 10cef5ca8a5b..f8a3c4054c70 100644
--- a/arch/i386/kernel/i8253.c
+++ b/arch/i386/kernel/i8253.c
@@ -110,7 +110,7 @@ void __init setup_pit_timer(void)
* Start pit with the boot cpu mask and make it global after the
* IO_APIC has been initialized.
*/
- pit_clockevent.cpumask = cpumask_of_cpu(0);
+ pit_clockevent.cpumask = cpumask_of_cpu(smp_processor_id());
pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 32);
pit_clockevent.max_delta_ns =
clockevent_delta2ns(0x7FFF, &pit_clockevent);
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index f23a17b3b8cf..1b623cda3a64 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -2579,19 +2579,19 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
if (irq < 0)
return irq;
- set_irq_msi(irq, desc);
ret = msi_compose_msg(dev, irq, &msg);
if (ret < 0) {
destroy_irq(irq);
return ret;
}
+ set_irq_msi(irq, desc);
write_msi_msg(irq, &msg);
set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq,
"edge");
- return irq;
+ return 0;
}
void arch_teardown_msi_irq(unsigned int irq)
diff --git a/arch/i386/mach-voyager/setup.c b/arch/i386/mach-voyager/setup.c
index cfa16c151c8f..447bb105cf58 100644
--- a/arch/i386/mach-voyager/setup.c
+++ b/arch/i386/mach-voyager/setup.c
@@ -40,10 +40,16 @@ void __init trap_init_hook(void)
{
}
-static struct irqaction irq0 = { timer_interrupt, IRQF_DISABLED, CPU_MASK_NONE, "timer", NULL, NULL};
+static struct irqaction irq0 = {
+ .handler = timer_interrupt,
+ .flags = IRQF_DISABLED | IRQF_NOBALANCING,
+ .mask = CPU_MASK_NONE,
+ .name = "timer"
+};
void __init time_init_hook(void)
{
+ irq0.mask = cpumask_of_cpu(safe_smp_processor_id());
setup_irq(0, &irq0);
}
diff --git a/arch/i386/mach-voyager/voyager_cat.c b/arch/i386/mach-voyager/voyager_cat.c
index 943a9473b138..26a2d4c54b68 100644
--- a/arch/i386/mach-voyager/voyager_cat.c
+++ b/arch/i386/mach-voyager/voyager_cat.c
@@ -1111,7 +1111,7 @@ voyager_cat_do_common_interrupt(void)
printk(KERN_ERR "Voyager front panel switch turned off\n");
voyager_status.switch_off = 1;
voyager_status.request_from_kernel = 1;
- up(&kvoyagerd_sem);
+ wake_up_process(voyager_thread);
}
/* Tell the hardware we're taking care of the
* shutdown, otherwise it will power the box off
@@ -1157,7 +1157,7 @@ voyager_cat_do_common_interrupt(void)
outb(VOYAGER_CAT_END, CAT_CMD);
voyager_status.power_fail = 1;
voyager_status.request_from_kernel = 1;
- up(&kvoyagerd_sem);
+ wake_up_process(voyager_thread);
}
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index b9ce33c0c202..1a5e448a29c7 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -536,15 +536,6 @@ do_boot_cpu(__u8 cpu)
& ~( voyager_extended_vic_processors
& voyager_allowed_boot_processors);
- /* For the 486, we can't use the 4Mb page table trick, so
- * must map a region of memory */
-#ifdef CONFIG_M486
- int i;
- unsigned long *page_table_copies = (unsigned long *)
- __get_free_page(GFP_KERNEL);
-#endif
- pgd_t orig_swapper_pg_dir0;
-
/* This is an area in head.S which was used to set up the
* initial kernel stack. We need to alter this to give the
* booting CPU a new stack (taken from its idle process) */
@@ -573,6 +564,8 @@ do_boot_cpu(__u8 cpu)
hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF;
cpucount++;
+ alternatives_smp_switch(1);
+
idle = fork_idle(cpu);
if(IS_ERR(idle))
panic("failed fork for CPU%d", cpu);
@@ -587,24 +580,11 @@ do_boot_cpu(__u8 cpu)
VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu,
(unsigned long)hijack_source.val, hijack_source.idt.Segment,
hijack_source.idt.Offset, stack_start.esp));
- /* set the original swapper_pg_dir[0] to map 0 to 4Mb transparently
- * (so that the booting CPU can find start_32 */
- orig_swapper_pg_dir0 = swapper_pg_dir[0];
-#ifdef CONFIG_M486
- if(page_table_copies == NULL)
- panic("No free memory for 486 page tables\n");
- for(i = 0; i < PAGE_SIZE/sizeof(unsigned long); i++)
- page_table_copies[i] = (i * PAGE_SIZE)
- | _PAGE_RW | _PAGE_USER | _PAGE_PRESENT;
-
- ((unsigned long *)swapper_pg_dir)[0] =
- ((virt_to_phys(page_table_copies)) & PAGE_MASK)
- | _PAGE_RW | _PAGE_USER | _PAGE_PRESENT;
-#else
- ((unsigned long *)swapper_pg_dir)[0] =
- (virt_to_phys(pg0) & PAGE_MASK)
- | _PAGE_RW | _PAGE_USER | _PAGE_PRESENT;
-#endif
+
+ /* init lowmem identity mapping */
+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
+ min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
+ flush_tlb_all();
if(quad_boot) {
printk("CPU %d: non extended Quad boot\n", cpu);
@@ -647,11 +627,7 @@ do_boot_cpu(__u8 cpu)
udelay(100);
}
/* reset the page table */
- swapper_pg_dir[0] = orig_swapper_pg_dir0;
- local_flush_tlb();
-#ifdef CONFIG_M486
- free_page((unsigned long)page_table_copies);
-#endif
+ zap_low_mappings();
if (cpu_booted_map) {
VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
@@ -1068,20 +1044,11 @@ smp_call_function_interrupt(void)
}
}
-/* Call this function on all CPUs using the function_interrupt above
- <func> The function to run. This must be fast and non-blocking.
- <info> An arbitrary pointer to pass to the function.
- <retry> If true, keep retrying until ready.
- <wait> If true, wait until function has completed on other CPUs.
- [RETURNS] 0 on success, else a negative status code. Does not return until
- remote CPUs are nearly ready to execute <<func>> or are or have executed.
-*/
-int
-smp_call_function (void (*func) (void *info), void *info, int retry,
- int wait)
+static int
+__smp_call_function_mask (void (*func) (void *info), void *info, int retry,
+ int wait, __u32 mask)
{
struct call_data_struct data;
- __u32 mask = cpus_addr(cpu_online_map)[0];
mask &= ~(1<<smp_processor_id());
@@ -1102,7 +1069,7 @@ smp_call_function (void (*func) (void *info), void *info, int retry,
call_data = &data;
wmb();
/* Send a message to all other CPUs and wait for them to respond */
- send_CPI_allbutself(VIC_CALL_FUNCTION_CPI);
+ send_CPI(mask, VIC_CALL_FUNCTION_CPI);
/* Wait for response */
while (data.started)
@@ -1116,8 +1083,48 @@ smp_call_function (void (*func) (void *info), void *info, int retry,
return 0;
}
+
+/* Call this function on all CPUs using the function_interrupt above
+ <func> The function to run. This must be fast and non-blocking.
+ <info> An arbitrary pointer to pass to the function.
+ <retry> If true, keep retrying until ready.
+ <wait> If true, wait until function has completed on other CPUs.
+ [RETURNS] 0 on success, else a negative status code. Does not return until
+ remote CPUs are nearly ready to execute <<func>> or are or have executed.
+*/
+int
+smp_call_function(void (*func) (void *info), void *info, int retry,
+ int wait)
+{
+ __u32 mask = cpus_addr(cpu_online_map)[0];
+
+ return __smp_call_function_mask(func, info, retry, wait, mask);
+}
EXPORT_SYMBOL(smp_call_function);
+/*
+ * smp_call_function_single - Run a function on another CPU
+ * @func: The function to run. This must be fast and non-blocking.
+ * @info: An arbitrary pointer to pass to the function.
+ * @nonatomic: Currently unused.
+ * @wait: If true, wait until function has completed on other CPUs.
+ *
+ * Retrurns 0 on success, else a negative status code.
+ *
+ * Does not return until the remote CPU is nearly ready to execute <func>
+ * or is or has executed.
+ */
+
+int
+smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+ int nonatomic, int wait)
+{
+ __u32 mask = 1 << cpu;
+
+ return __smp_call_function_mask(func, info, nonatomic, wait, mask);
+}
+EXPORT_SYMBOL(smp_call_function_single);
+
/* Sorry about the name. In an APIC based system, the APICs
* themselves are programmed to send a timer interrupt. This is used
* by linux to reschedule the processor. Voyager doesn't have this,
diff --git a/arch/i386/mach-voyager/voyager_thread.c b/arch/i386/mach-voyager/voyager_thread.c
index f39887359e8e..fdc1d926fb2a 100644
--- a/arch/i386/mach-voyager/voyager_thread.c
+++ b/arch/i386/mach-voyager/voyager_thread.c
@@ -24,33 +24,16 @@
#include <linux/kmod.h>
#include <linux/completion.h>
#include <linux/sched.h>
+#include <linux/kthread.h>
#include <asm/desc.h>
#include <asm/voyager.h>
#include <asm/vic.h>
#include <asm/mtrr.h>
#include <asm/msr.h>
-#define THREAD_NAME "kvoyagerd"
-/* external variables */
-int kvoyagerd_running = 0;
-DECLARE_MUTEX_LOCKED(kvoyagerd_sem);
-
-static int thread(void *);
-
-static __u8 set_timeout = 0;
-
-/* Start the machine monitor thread. Return 1 if OK, 0 if fail */
-static int __init
-voyager_thread_start(void)
-{
- if(kernel_thread(thread, NULL, CLONE_KERNEL) < 0) {
- /* This is serious, but not fatal */
- printk(KERN_ERR "Voyager: Failed to create system monitor thread!!!\n");
- return 1;
- }
- return 0;
-}
+struct task_struct *voyager_thread;
+static __u8 set_timeout;
static int
execute(const char *string)
@@ -110,31 +93,15 @@ check_continuing_condition(void)
}
}
-static void
-wakeup(unsigned long unused)
-{
- up(&kvoyagerd_sem);
-}
-
static int
thread(void *unused)
{
- struct timer_list wakeup_timer;
-
- kvoyagerd_running = 1;
-
- daemonize(THREAD_NAME);
-
- set_timeout = 0;
-
- init_timer(&wakeup_timer);
-
- sigfillset(&current->blocked);
-
printk(KERN_NOTICE "Voyager starting monitor thread\n");
- for(;;) {
- down_interruptible(&kvoyagerd_sem);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(set_timeout ? HZ : MAX_SCHEDULE_TIMEOUT);
+
VDEBUG(("Voyager Daemon awoken\n"));
if(voyager_status.request_from_kernel == 0) {
/* probably awoken from timeout */
@@ -143,20 +110,26 @@ thread(void *unused)
check_from_kernel();
voyager_status.request_from_kernel = 0;
}
- if(set_timeout) {
- del_timer(&wakeup_timer);
- wakeup_timer.expires = HZ + jiffies;
- wakeup_timer.function = wakeup;
- add_timer(&wakeup_timer);
- }
}
}
+static int __init
+voyager_thread_start(void)
+{
+ voyager_thread = kthread_run(thread, NULL, "kvoyagerd");
+ if (IS_ERR(voyager_thread)) {
+ printk(KERN_ERR "Voyager: Failed to create system monitor thread.\n");
+ return PTR_ERR(voyager_thread);
+ }
+ return 0;
+}
+
+
static void __exit
voyager_thread_stop(void)
{
- /* FIXME: do nothing at the moment */
+ kthread_stop(voyager_thread);
}
module_init(voyager_thread_start);
-//module_exit(voyager_thread_stop);
+module_exit(voyager_thread_stop);
diff --git a/arch/i386/pci/fixup.c b/arch/i386/pci/fixup.c
index 8053b17ab647..b62eafb997bc 100644
--- a/arch/i386/pci/fixup.c
+++ b/arch/i386/pci/fixup.c
@@ -354,7 +354,7 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
printk(KERN_DEBUG "Boot video device is %s\n", pci_name(pdev));
}
}
-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video);
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video);
/*
* Some Toshiba laptops need extra code to enable their TI TSB43AB22/A.
diff --git a/arch/i386/pci/i386.c b/arch/i386/pci/i386.c
index 43005f044424..bcd2f94b732c 100644
--- a/arch/i386/pci/i386.c
+++ b/arch/i386/pci/i386.c
@@ -246,8 +246,8 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask)
continue;
if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available "
- "because of resource collisions\n",
- pci_name(dev));
+ "because of resource %d collisions\n",
+ pci_name(dev), idx);
return -EINVAL;
}
if (r->flags & IORESOURCE_IO)