aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile14
-rw-r--r--arch/mips/kernel/asm-offsets.c25
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c4
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c35
-rw-r--r--arch/mips/kernel/branch.c29
-rw-r--r--arch/mips/kernel/cpu-probe.c256
-rw-r--r--arch/mips/kernel/dma-no-isa.c28
-rw-r--r--arch/mips/kernel/entry.S54
-rw-r--r--arch/mips/kernel/gdb-low.S5
-rw-r--r--arch/mips/kernel/gdb-stub.c23
-rw-r--r--arch/mips/kernel/genex.S44
-rw-r--r--arch/mips/kernel/genrtc.c64
-rw-r--r--arch/mips/kernel/head.S70
-rw-r--r--arch/mips/kernel/i8259.c21
-rw-r--r--arch/mips/kernel/ioctl32.c6
-rw-r--r--arch/mips/kernel/irixelf.c254
-rw-r--r--arch/mips/kernel/irixinv.c7
-rw-r--r--arch/mips/kernel/irixioctl.c63
-rw-r--r--arch/mips/kernel/irixsig.c408
-rw-r--r--arch/mips/kernel/irq-msc01.c38
-rw-r--r--arch/mips/kernel/irq-mv6434x.c15
-rw-r--r--arch/mips/kernel/irq-rm7000.c14
-rw-r--r--arch/mips/kernel/irq-rm9000.c28
-rw-r--r--arch/mips/kernel/irq_cpu.c91
-rw-r--r--arch/mips/kernel/linux32.c164
-rw-r--r--arch/mips/kernel/module-elf32.c250
-rw-r--r--arch/mips/kernel/module-elf64.c274
-rw-r--r--arch/mips/kernel/module.c336
-rw-r--r--arch/mips/kernel/proc.c135
-rw-r--r--arch/mips/kernel/process.c213
-rw-r--r--arch/mips/kernel/ptrace.c244
-rw-r--r--arch/mips/kernel/ptrace32.c150
-rw-r--r--arch/mips/kernel/r4k_fpu.S5
-rw-r--r--arch/mips/kernel/rtlx.c341
-rw-r--r--arch/mips/kernel/scall32-o32.S13
-rw-r--r--arch/mips/kernel/scall64-64.S4
-rw-r--r--arch/mips/kernel/scall64-n32.S32
-rw-r--r--arch/mips/kernel/scall64-o32.S14
-rw-r--r--arch/mips/kernel/semaphore.c12
-rw-r--r--arch/mips/kernel/setup.c46
-rw-r--r--arch/mips/kernel/signal-common.h90
-rw-r--r--arch/mips/kernel/signal.c143
-rw-r--r--arch/mips/kernel/signal32.c114
-rw-r--r--arch/mips/kernel/signal_n32.c37
-rw-r--r--arch/mips/kernel/smp.c51
-rw-r--r--arch/mips/kernel/smp_mt.c366
-rw-r--r--arch/mips/kernel/syscall.c34
-rw-r--r--arch/mips/kernel/sysirix.c539
-rw-r--r--arch/mips/kernel/time.c16
-rw-r--r--arch/mips/kernel/traps.c499
-rw-r--r--arch/mips/kernel/unaligned.c10
-rw-r--r--arch/mips/kernel/vmlinux.lds.S13
-rw-r--r--arch/mips/kernel/vpe.c1296
53 files changed, 4968 insertions, 2069 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index d3303584fbd1..72f2126ad19d 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -11,11 +11,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
irix5sys.o sysirix.o
-ifdef CONFIG_MODULES
-obj-y += mips_ksyms.o module.o
-obj-$(CONFIG_32BIT) += module-elf32.o
-obj-$(CONFIG_64BIT) += module-elf64.o
-endif
+obj-$(CONFIG_MODULES) += mips_ksyms.o module.o
obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
@@ -38,12 +34,18 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_MIPS_MT_SMP) += smp_mt.o
+
+obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
+obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
+
obj-$(CONFIG_NO_ISA) += dma-no-isa.o
obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o
+obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
@@ -57,8 +59,6 @@ obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_64BIT) += cpu-bugs64.o
-obj-$(CONFIG_GEN_RTC) += genrtc.o
-
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
CFLAGS_ioctl32.o += -Ifs/
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 2c11abb5a406..ca6b03c773be 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -95,6 +95,7 @@ void output_thread_info_defines(void)
offset("#define TI_PRE_COUNT ", struct thread_info, preempt_count);
offset("#define TI_ADDR_LIMIT ", struct thread_info, addr_limit);
offset("#define TI_RESTART_BLOCK ", struct thread_info, restart_block);
+ offset("#define TI_TP_VALUE ", struct thread_info, tp_value);
constant("#define _THREAD_SIZE_ORDER ", THREAD_SIZE_ORDER);
constant("#define _THREAD_SIZE ", THREAD_SIZE);
constant("#define _THREAD_MASK ", THREAD_MASK);
@@ -240,6 +241,7 @@ void output_mm_defines(void)
linefeed;
}
+#ifdef CONFIG_32BIT
void output_sc_defines(void)
{
text("/* Linux sigcontext offsets. */");
@@ -251,10 +253,29 @@ void output_sc_defines(void)
offset("#define SC_STATUS ", struct sigcontext, sc_status);
offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
offset("#define SC_FPC_EIR ", struct sigcontext, sc_fpc_eir);
- offset("#define SC_CAUSE ", struct sigcontext, sc_cause);
- offset("#define SC_BADVADDR ", struct sigcontext, sc_badvaddr);
+ offset("#define SC_HI1 ", struct sigcontext, sc_hi1);
+ offset("#define SC_LO1 ", struct sigcontext, sc_lo1);
+ offset("#define SC_HI2 ", struct sigcontext, sc_hi2);
+ offset("#define SC_LO2 ", struct sigcontext, sc_lo2);
+ offset("#define SC_HI3 ", struct sigcontext, sc_hi3);
+ offset("#define SC_LO3 ", struct sigcontext, sc_lo3);
linefeed;
}
+#endif
+
+#ifdef CONFIG_64BIT
+void output_sc_defines(void)
+{
+ text("/* Linux sigcontext offsets. */");
+ offset("#define SC_REGS ", struct sigcontext, sc_regs);
+ offset("#define SC_FPREGS ", struct sigcontext, sc_fpregs);
+ offset("#define SC_MDHI ", struct sigcontext, sc_hi);
+ offset("#define SC_MDLO ", struct sigcontext, sc_lo);
+ offset("#define SC_PC ", struct sigcontext, sc_pc);
+ offset("#define SC_FPC_CSR ", struct sigcontext, sc_fpc_csr);
+ linefeed;
+}
+#endif
#ifdef CONFIG_MIPS32_COMPAT
void output_sc32_defines(void)
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 6b645fbb1ddc..d8e2674a1543 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -52,7 +52,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#include <asm/processor.h>
#include <linux/module.h>
-#include <linux/config.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
@@ -116,4 +115,7 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef MODULE_DESCRIPTION
#undef MODULE_AUTHOR
+#undef TASK_SIZE
+#define TASK_SIZE TASK_SIZE32
+
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index b4075e99c452..cec5f327e360 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -54,7 +54,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#include <asm/processor.h>
#include <linux/module.h>
-#include <linux/config.h>
#include <linux/elfcore.h>
#include <linux/compat.h>
@@ -98,7 +97,7 @@ struct elf_prpsinfo32
#define init_elf_binfmt init_elf32_binfmt
#define jiffies_to_timeval jiffies_to_compat_timeval
-static __inline__ void
+static inline void
jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
{
/*
@@ -113,21 +112,26 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
#undef ELF_CORE_COPY_REGS
#define ELF_CORE_COPY_REGS(_dest,_regs) elf32_core_copy_regs(_dest,_regs);
-void elf32_core_copy_regs(elf_gregset_t _dest, struct pt_regs *_regs)
+void elf32_core_copy_regs(elf_gregset_t grp, struct pt_regs *regs)
{
int i;
- memset(_dest, 0, sizeof(elf_gregset_t));
-
- /* XXXKW the 6 is from EF_REG0 in gdb/gdb/mips-linux-tdep.c, include/asm-mips/reg.h */
- for (i=6; i<38; i++)
- _dest[i] = (elf_greg_t) _regs->regs[i-6];
- _dest[i++] = (elf_greg_t) _regs->lo;
- _dest[i++] = (elf_greg_t) _regs->hi;
- _dest[i++] = (elf_greg_t) _regs->cp0_epc;
- _dest[i++] = (elf_greg_t) _regs->cp0_badvaddr;
- _dest[i++] = (elf_greg_t) _regs->cp0_status;
- _dest[i++] = (elf_greg_t) _regs->cp0_cause;
+ for (i = 0; i < EF_R0; i++)
+ grp[i] = 0;
+ grp[EF_R0] = 0;
+ for (i = 1; i <= 31; i++)
+ grp[EF_R0 + i] = (elf_greg_t) regs->regs[i];
+ grp[EF_R26] = 0;
+ grp[EF_R27] = 0;
+ grp[EF_LO] = (elf_greg_t) regs->lo;
+ grp[EF_HI] = (elf_greg_t) regs->hi;
+ grp[EF_CP0_EPC] = (elf_greg_t) regs->cp0_epc;
+ grp[EF_CP0_BADVADDR] = (elf_greg_t) regs->cp0_badvaddr;
+ grp[EF_CP0_STATUS] = (elf_greg_t) regs->cp0_status;
+ grp[EF_CP0_CAUSE] = (elf_greg_t) regs->cp0_cause;
+#ifdef EF_UNUSED0
+ grp[EF_UNUSED0] = 0;
+#endif
}
MODULE_DESCRIPTION("Binary format loader for compatibility with o32 Linux/MIPS binaries");
@@ -136,4 +140,7 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)");
#undef MODULE_DESCRIPTION
#undef MODULE_AUTHOR
+#undef TASK_SIZE
+#define TASK_SIZE TASK_SIZE32
+
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 01117e977a7f..374de839558d 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -12,6 +12,7 @@
#include <asm/branch.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
+#include <asm/fpu.h>
#include <asm/inst.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
@@ -21,7 +22,7 @@
*/
int __compute_return_epc(struct pt_regs *regs)
{
- unsigned int *addr, bit, fcr31;
+ unsigned int *addr, bit, fcr31, dspcontrol;
long epc;
union mips_instruction insn;
@@ -98,6 +99,18 @@ int __compute_return_epc(struct pt_regs *regs)
epc += 8;
regs->cp0_epc = epc;
break;
+ case bposge32_op:
+ if (!cpu_has_dsp)
+ goto sigill;
+
+ dspcontrol = rddsp(0x01);
+
+ if (dspcontrol >= 32) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
}
break;
@@ -161,10 +174,13 @@ int __compute_return_epc(struct pt_regs *regs)
* And now the FPA/cp1 branch instructions.
*/
case cop1_op:
- if (!cpu_has_fpu)
- fcr31 = current->thread.fpu.soft.fcr31;
- else
+ preempt_disable();
+ if (is_fpu_owner())
asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+ else
+ fcr31 = current->thread.fpu.hard.fcr31;
+ preempt_enable();
+
bit = (insn.i_format.rt >> 2);
bit += (bit != 0);
bit += 23;
@@ -196,4 +212,9 @@ unaligned:
printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
+
+sigill:
+ printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
+ force_sig(SIGBUS, current);
+ return -EFAULT;
}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 7685f8baf3f0..a263fb7a3971 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -2,9 +2,9 @@
* Processor capabilities determination functions.
*
* Copyright (C) xxxx the Anonymous
- * Copyright (C) 2003 Maciej W. Rozycki
+ * Copyright (C) 2003, 2004 Maciej W. Rozycki
* Copyright (C) 1994 - 2003 Ralf Baechle
- * Copyright (C) 2001 MIPS Inc.
+ * Copyright (C) 2001, 2004 MIPS Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -17,7 +17,6 @@
#include <linux/ptrace.h>
#include <linux/stddef.h>
-#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
@@ -51,36 +50,48 @@ static void r4k_wait(void)
".set\tmips0");
}
-/*
- * The Au1xxx wait is available only if we run CONFIG_PM and
- * the timer setup found we had a 32KHz counter available.
- * There are still problems with functions that may call au1k_wait
- * directly, but that will be discovered pretty quickly.
- */
-extern void (*au1k_wait_ptr)(void);
+/* The Au1xxx wait is available only if using 32khz counter or
+ * external timer source, but specifically not CP0 Counter. */
+int allow_au1k_wait;
-void au1k_wait(void)
+static void au1k_wait(void)
{
-#ifdef CONFIG_PM
/* using the wait instruction makes CP0 counter unusable */
- __asm__(".set\tmips3\n\t"
+ __asm__(".set mips3\n\t"
+ "cache 0x14, 0(%0)\n\t"
+ "cache 0x14, 32(%0)\n\t"
+ "sync\n\t"
+ "nop\n\t"
"wait\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t"
"nop\n\t"
- ".set\tmips0");
-#else
- __asm__("nop\n\t"
- "nop");
-#endif
+ ".set mips0\n\t"
+ : : "r" (au1k_wait));
}
+static int __initdata nowait = 0;
+
+int __init wait_disable(char *s)
+{
+ nowait = 1;
+
+ return 1;
+}
+
+__setup("nowait", wait_disable);
+
static inline void check_wait(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
printk("Checking for 'wait' instruction... ");
+ if (nowait) {
+ printk (" disabled.\n");
+ return;
+ }
+
switch (c->cputype) {
case CPU_R3081:
case CPU_R3081E:
@@ -109,22 +120,22 @@ static inline void check_wait(void)
/* case CPU_20KC:*/
case CPU_24K:
case CPU_25KF:
+ case CPU_34K:
+ case CPU_PR4450:
cpu_wait = r4k_wait;
printk(" available.\n");
break;
-#ifdef CONFIG_PM
case CPU_AU1000:
case CPU_AU1100:
case CPU_AU1500:
- if (au1k_wait_ptr != NULL) {
- cpu_wait = au1k_wait_ptr;
+ case CPU_AU1550:
+ case CPU_AU1200:
+ if (allow_au1k_wait) {
+ cpu_wait = au1k_wait;
printk(" available.\n");
- }
- else {
+ } else
printk(" unavailable.\n");
- }
break;
-#endif
default:
printk(" unavailable.\n");
break;
@@ -180,7 +191,7 @@ static inline int __cpu_has_fpu(void)
return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE);
}
-#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4KTLB \
+#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
| MIPS_CPU_COUNTER)
static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
@@ -189,7 +200,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
case PRID_IMP_R2000:
c->cputype = CPU_R2000;
c->isa_level = MIPS_CPU_ISA_I;
- c->options = MIPS_CPU_TLB | MIPS_CPU_NOFPUEX;
+ c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
+ MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
@@ -203,7 +215,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
else
c->cputype = CPU_R3000;
c->isa_level = MIPS_CPU_ISA_I;
- c->options = MIPS_CPU_TLB | MIPS_CPU_NOFPUEX;
+ c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
+ MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
c->options |= MIPS_CPU_FPU;
c->tlbsize = 64;
@@ -266,7 +279,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
case PRID_IMP_R4600:
c->cputype = CPU_R4600;
c->isa_level = MIPS_CPU_ISA_III;
- c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
#if 0
@@ -285,7 +299,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
#endif
case PRID_IMP_TX39:
c->isa_level = MIPS_CPU_ISA_I;
- c->options = MIPS_CPU_TLB;
+ c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE;
if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) {
c->cputype = CPU_TX3927;
@@ -421,74 +435,147 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
}
}
-static inline void decode_config1(struct cpuinfo_mips *c)
+static inline unsigned int decode_config0(struct cpuinfo_mips *c)
{
- unsigned long config0 = read_c0_config();
- unsigned long config1;
+ unsigned int config0;
+ int isa;
+
+ config0 = read_c0_config();
- if ((config0 & (1 << 31)) == 0)
- return; /* actually wort a panic() */
+ if (((config0 & MIPS_CONF_MT) >> 7) == 1)
+ c->options |= MIPS_CPU_TLB;
+ isa = (config0 & MIPS_CONF_AT) >> 13;
+ switch (isa) {
+ case 0:
+ c->isa_level = MIPS_CPU_ISA_M32;
+ break;
+ case 2:
+ c->isa_level = MIPS_CPU_ISA_M64;
+ break;
+ default:
+ panic("Unsupported ISA type, cp0.config0.at: %d.", isa);
+ }
+
+ return config0 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config1(struct cpuinfo_mips *c)
+{
+ unsigned int config1;
- /* MIPS32 or MIPS64 compliant CPU. Read Config 1 register. */
- c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
- MIPS_CPU_4KTLB | MIPS_CPU_COUNTER | MIPS_CPU_DIVEC |
- MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
config1 = read_c0_config1();
- if (config1 & (1 << 3))
+
+ if (config1 & MIPS_CONF1_MD)
+ c->ases |= MIPS_ASE_MDMX;
+ if (config1 & MIPS_CONF1_WR)
c->options |= MIPS_CPU_WATCH;
- if (config1 & (1 << 2))
- c->options |= MIPS_CPU_MIPS16;
- if (config1 & (1 << 1))
+ if (config1 & MIPS_CONF1_CA)
+ c->ases |= MIPS_ASE_MIPS16;
+ if (config1 & MIPS_CONF1_EP)
c->options |= MIPS_CPU_EJTAG;
- if (config1 & 1) {
+ if (config1 & MIPS_CONF1_FP) {
c->options |= MIPS_CPU_FPU;
c->options |= MIPS_CPU_32FPR;
}
+ if (cpu_has_tlb)
+ c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1;
+
+ return config1 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config2(struct cpuinfo_mips *c)
+{
+ unsigned int config2;
+
+ config2 = read_c0_config2();
+
+ if (config2 & MIPS_CONF2_SL)
+ c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+
+ return config2 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config3(struct cpuinfo_mips *c)
+{
+ unsigned int config3;
+
+ config3 = read_c0_config3();
+
+ if (config3 & MIPS_CONF3_SM)
+ c->ases |= MIPS_ASE_SMARTMIPS;
+ if (config3 & MIPS_CONF3_DSP)
+ c->ases |= MIPS_ASE_DSP;
+ if (config3 & MIPS_CONF3_VINT)
+ c->options |= MIPS_CPU_VINT;
+ if (config3 & MIPS_CONF3_VEIC)
+ c->options |= MIPS_CPU_VEIC;
+ if (config3 & MIPS_CONF3_MT)
+ c->ases |= MIPS_ASE_MIPSMT;
+
+ return config3 & MIPS_CONF_M;
+}
+
+static inline void decode_configs(struct cpuinfo_mips *c)
+{
+ /* MIPS32 or MIPS64 compliant CPU. */
+ c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
+ MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
+
c->scache.flags = MIPS_CACHE_NOT_PRESENT;
- c->tlbsize = ((config1 >> 25) & 0x3f) + 1;
+ /* Read Config registers. */
+ if (!decode_config0(c))
+ return; /* actually worth a panic() */
+ if (!decode_config1(c))
+ return;
+ if (!decode_config2(c))
+ return;
+ if (!decode_config3(c))
+ return;
}
static inline void cpu_probe_mips(struct cpuinfo_mips *c)
{
- decode_config1(c);
+ decode_configs(c);
switch (c->processor_id & 0xff00) {
case PRID_IMP_4KC:
c->cputype = CPU_4KC;
- c->isa_level = MIPS_CPU_ISA_M32;
break;
case PRID_IMP_4KEC:
c->cputype = CPU_4KEC;
- c->isa_level = MIPS_CPU_ISA_M32;
+ break;
+ case PRID_IMP_4KECR2:
+ c->cputype = CPU_4KEC;
break;
case PRID_IMP_4KSC:
+ case PRID_IMP_4KSD:
c->cputype = CPU_4KSC;
- c->isa_level = MIPS_CPU_ISA_M32;
break;
case PRID_IMP_5KC:
c->cputype = CPU_5KC;
- c->isa_level = MIPS_CPU_ISA_M64;
break;
case PRID_IMP_20KC:
c->cputype = CPU_20KC;
- c->isa_level = MIPS_CPU_ISA_M64;
break;
case PRID_IMP_24K:
+ case PRID_IMP_24KE:
c->cputype = CPU_24K;
- c->isa_level = MIPS_CPU_ISA_M32;
break;
case PRID_IMP_25KF:
c->cputype = CPU_25KF;
- c->isa_level = MIPS_CPU_ISA_M64;
/* Probe for L2 cache */
c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
break;
+ case PRID_IMP_34K:
+ c->cputype = CPU_34K;
+ c->isa_level = MIPS_CPU_ISA_M32;
+ break;
}
}
static inline void cpu_probe_alchemy(struct cpuinfo_mips *c)
{
- decode_config1(c);
+ decode_configs(c);
switch (c->processor_id & 0xff00) {
case PRID_IMP_AU1_REV1:
case PRID_IMP_AU1_REV2:
@@ -505,50 +592,70 @@ static inline void cpu_probe_alchemy(struct cpuinfo_mips *c)
case 3:
c->cputype = CPU_AU1550;
break;
+ case 4:
+ c->cputype = CPU_AU1200;
+ break;
default:
panic("Unknown Au Core!");
break;
}
- c->isa_level = MIPS_CPU_ISA_M32;
break;
}
}
static inline void cpu_probe_sibyte(struct cpuinfo_mips *c)
{
- decode_config1(c);
+ decode_configs(c);
+
+ /*
+ * For historical reasons the SB1 comes with it's own variant of
+ * cache code which eventually will be folded into c-r4k.c. Until
+ * then we pretend it's got it's own cache architecture.
+ */
+ c->options &= ~MIPS_CPU_4K_CACHE;
+ c->options |= MIPS_CPU_SB1_CACHE;
+
switch (c->processor_id & 0xff00) {
case PRID_IMP_SB1:
c->cputype = CPU_SB1;
- c->isa_level = MIPS_CPU_ISA_M64;
- c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
- MIPS_CPU_COUNTER | MIPS_CPU_DIVEC |
- MIPS_CPU_MCHECK | MIPS_CPU_EJTAG |
- MIPS_CPU_WATCH | MIPS_CPU_LLSC;
-#ifndef CONFIG_SB1_PASS_1_WORKAROUNDS
+#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
/* FPU in pass1 is known to have issues. */
- c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR;
+ c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR);
#endif
break;
+ case PRID_IMP_SB1A:
+ c->cputype = CPU_SB1A;
+ break;
}
}
static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c)
{
- decode_config1(c);
+ decode_configs(c);
switch (c->processor_id & 0xff00) {
case PRID_IMP_SR71000:
c->cputype = CPU_SR71000;
- c->isa_level = MIPS_CPU_ISA_M64;
- c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
- MIPS_CPU_4KTLB | MIPS_CPU_FPU |
- MIPS_CPU_COUNTER | MIPS_CPU_MCHECK;
c->scache.ways = 8;
c->tlbsize = 64;
break;
}
}
+static inline void cpu_probe_philips(struct cpuinfo_mips *c)
+{
+ decode_configs(c);
+ switch (c->processor_id & 0xff00) {
+ case PRID_IMP_PR4450:
+ c->cputype = CPU_PR4450;
+ c->isa_level = MIPS_CPU_ISA_M32;
+ break;
+ default:
+ panic("Unknown Philips Core!"); /* REVISIT: die? */
+ break;
+ }
+}
+
+
__init void cpu_probe(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
@@ -571,15 +678,24 @@ __init void cpu_probe(void)
case PRID_COMP_SIBYTE:
cpu_probe_sibyte(c);
break;
-
case PRID_COMP_SANDCRAFT:
cpu_probe_sandcraft(c);
break;
+ case PRID_COMP_PHILIPS:
+ cpu_probe_philips(c);
+ break;
default:
c->cputype = CPU_UNKNOWN;
}
- if (c->options & MIPS_CPU_FPU)
+ if (c->options & MIPS_CPU_FPU) {
c->fpu_id = cpu_get_fpu_id();
+
+ if (c->isa_level == MIPS_CPU_ISA_M32 ||
+ c->isa_level == MIPS_CPU_ISA_M64) {
+ if (c->fpu_id & MIPS_FPIR_3D)
+ c->ases |= MIPS_ASE_MIPS3D;
+ }
+ }
}
__init void cpu_report(void)
diff --git a/arch/mips/kernel/dma-no-isa.c b/arch/mips/kernel/dma-no-isa.c
new file mode 100644
index 000000000000..6df8b07741e3
--- /dev/null
+++ b/arch/mips/kernel/dma-no-isa.c
@@ -0,0 +1,28 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 by Ralf Baechle
+ *
+ * Dummy ISA DMA functions for systems that don't have ISA but share drivers
+ * with ISA such as legacy free PCI.
+ */
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+DEFINE_SPINLOCK(dma_spin_lock);
+
+int request_dma(unsigned int dmanr, const char * device_id)
+{
+ return -EINVAL;
+}
+
+void free_dma(unsigned int dmanr)
+{
+}
+
+EXPORT_SYMBOL(dma_spin_lock);
+EXPORT_SYMBOL(request_dma);
+EXPORT_SYMBOL(free_dma);
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 5eb429137e06..83c87fe4ee4f 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -19,11 +19,11 @@
#include <asm/war.h>
#ifdef CONFIG_PREEMPT
- .macro preempt_stop reg=t0
+ .macro preempt_stop
.endm
#else
- .macro preempt_stop reg=t0
- local_irq_disable \reg
+ .macro preempt_stop
+ local_irq_disable
.endm
#define resume_kernel restore_all
#endif
@@ -37,17 +37,18 @@ FEXPORT(ret_from_irq)
andi t0, t0, KU_USER
beqz t0, resume_kernel
-FEXPORT(resume_userspace)
- local_irq_disable t0 # make sure we dont miss an
+resume_userspace:
+ local_irq_disable # make sure we dont miss an
# interrupt setting need_resched
# between sampling and return
LONG_L a2, TI_FLAGS($28) # current->work
- andi a2, _TIF_WORK_MASK # (ignoring syscall_trace)
- bnez a2, work_pending
+ andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace)
+ bnez t0, work_pending
j restore_all
#ifdef CONFIG_PREEMPT
-ENTRY(resume_kernel)
+resume_kernel:
+ local_irq_disable
lw t0, TI_PRE_COUNT($28)
bnez t0, restore_all
need_resched:
@@ -57,12 +58,7 @@ need_resched:
LONG_L t0, PT_STATUS(sp) # Interrupts off?
andi t0, 1
beqz t0, restore_all
- li t0, PREEMPT_ACTIVE
- sw t0, TI_PRE_COUNT($28)
- local_irq_enable t0
- jal schedule
- sw zero, TI_PRE_COUNT($28)
- local_irq_disable t0
+ jal preempt_schedule_irq
b need_resched
#endif
@@ -88,13 +84,13 @@ FEXPORT(restore_partial) # restore partial frame
RESTORE_SP_AND_RET
.set at
-FEXPORT(work_pending)
- andi t0, a2, _TIF_NEED_RESCHED
+work_pending:
+ andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
beqz t0, work_notifysig
work_resched:
jal schedule
- local_irq_disable t0 # make sure need_resched and
+ local_irq_disable # make sure need_resched and
# signals dont change between
# sampling and return
LONG_L a2, TI_FLAGS($28)
@@ -109,15 +105,14 @@ work_notifysig: # deal with pending signals and
move a0, sp
li a1, 0
jal do_notify_resume # a2 already loaded
- j restore_all
+ j resume_userspace
FEXPORT(syscall_exit_work_partial)
SAVE_STATIC
-FEXPORT(syscall_exit_work)
- LONG_L t0, TI_FLAGS($28)
- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
- and t0, t1
- beqz t0, work_pending # trace bit is set
+syscall_exit_work:
+ li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
+ and t0, a2 # a2 is preloaded with TI_FLAGS
+ beqz t0, work_pending # trace bit set?
local_irq_enable # could let do_syscall_trace()
# call schedule() instead
move a0, sp
@@ -128,28 +123,25 @@ FEXPORT(syscall_exit_work)
/*
* Common spurious interrupt handler.
*/
- .text
- .align 5
LEAF(spurious_interrupt)
/*
* Someone tried to fool us by sending an interrupt but we
* couldn't find a cause for it.
*/
+ PTR_LA t1, irq_err_count
#ifdef CONFIG_SMP
- lui t1, %hi(irq_err_count)
-1: ll t0, %lo(irq_err_count)(t1)
+1: ll t0, (t1)
addiu t0, 1
- sc t0, %lo(irq_err_count)(t1)
+ sc t0, (t1)
#if R10000_LLSC_WAR
beqzl t0, 1b
#else
beqz t0, 1b
#endif
#else
- lui t1, %hi(irq_err_count)
- lw t0, %lo(irq_err_count)(t1)
+ lw t0, (t1)
addiu t0, 1
- sw t0, %lo(irq_err_count)(t1)
+ sw t0, (t1)
#endif
j ret_from_irq
END(spurious_interrupt)
diff --git a/arch/mips/kernel/gdb-low.S b/arch/mips/kernel/gdb-low.S
index 512bedbfa7b9..83b8986f9401 100644
--- a/arch/mips/kernel/gdb-low.S
+++ b/arch/mips/kernel/gdb-low.S
@@ -52,16 +52,15 @@
/*
* Called from user mode, go somewhere else.
*/
- lui k1, %hi(saved_vectors)
mfc0 k0, CP0_CAUSE
andi k0, k0, 0x7c
add k1, k1, k0
- lw k0, %lo(saved_vectors)(k1)
+ PTR_L k0, saved_vectors(k1)
jr k0
nop
1:
move k0, sp
- subu sp, k1, GDB_FR_SIZE*2 # see comment above
+ PTR_SUBU sp, k1, GDB_FR_SIZE*2 # see comment above
LONG_S k0, GDB_FR_REG29(sp)
LONG_S $2, GDB_FR_REG2(sp)
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
index d3fd1ab14274..96d18c43dca0 100644
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -176,8 +176,10 @@ int kgdb_enabled;
/*
* spin locks for smp case
*/
-static spinlock_t kgdb_lock = SPIN_LOCK_UNLOCKED;
-static spinlock_t kgdb_cpulock[NR_CPUS] = { [0 ... NR_CPUS-1] = SPIN_LOCK_UNLOCKED};
+static DEFINE_SPINLOCK(kgdb_lock);
+static raw_spinlock_t kgdb_cpulock[NR_CPUS] = {
+ [0 ... NR_CPUS-1] = __RAW_SPIN_LOCK_UNLOCKED;
+};
/*
* BUFMAX defines the maximum number of characters in inbound/outbound buffers
@@ -637,29 +639,32 @@ static struct gdb_bp_save async_bp;
* and only one can be active at a time.
*/
extern spinlock_t smp_call_lock;
+
void set_async_breakpoint(unsigned long *epc)
{
/* skip breaking into userland */
if ((*epc & 0x80000000) == 0)
return;
+#ifdef CONFIG_SMP
/* avoid deadlock if someone is make IPC */
if (spin_is_locked(&smp_call_lock))
return;
+#endif
async_bp.addr = *epc;
*epc = (unsigned long)async_breakpoint;
}
-void kgdb_wait(void *arg)
+static void kgdb_wait(void *arg)
{
unsigned flags;
int cpu = smp_processor_id();
local_irq_save(flags);
- spin_lock(&kgdb_cpulock[cpu]);
- spin_unlock(&kgdb_cpulock[cpu]);
+ __raw_spin_lock(&kgdb_cpulock[cpu]);
+ __raw_spin_unlock(&kgdb_cpulock[cpu]);
local_irq_restore(flags);
}
@@ -707,7 +712,7 @@ void handle_exception (struct gdb_regs *regs)
* acquire the CPU spinlocks
*/
for (i = num_online_cpus()-1; i >= 0; i--)
- if (spin_trylock(&kgdb_cpulock[i]) == 0)
+ if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0)
panic("kgdb: couldn't get cpulock %d\n", i);
/*
@@ -982,7 +987,7 @@ finish_kgdb:
exit_kgdb_exception:
/* release locks so other CPUs can go */
for (i = num_online_cpus()-1; i >= 0; i--)
- spin_unlock(&kgdb_cpulock[i]);
+ __raw_spin_unlock(&kgdb_cpulock[i]);
spin_unlock(&kgdb_lock);
__flush_cache_all();
@@ -1036,12 +1041,12 @@ void adel(void)
* malloc is needed by gdb client in "call func()", even a private one
* will make gdb happy
*/
-static void *malloc(size_t size)
+static void * __attribute_used__ malloc(size_t size)
{
return kmalloc(size, GFP_ATOMIC);
}
-static void free(void *where)
+static void __attribute_used__ free (void *where)
{
kfree(where);
}
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index e7f6c1b90806..aa18a8b7b380 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -82,7 +82,7 @@ NESTED(except_vec3_r4000, 0, sp)
li k0, 14<<2
beq k1, k0, handle_vcei
#ifdef CONFIG_64BIT
- dsll k1, k1, 1
+ dsll k1, k1, 1
#endif
.set pop
PTR_L k0, exception_handlers(k1)
@@ -90,17 +90,17 @@ NESTED(except_vec3_r4000, 0, sp)
/*
* Big shit, we now may have two dirty primary cache lines for the same
- * physical address. We can savely invalidate the line pointed to by
+ * physical address. We can safely invalidate the line pointed to by
* c0_badvaddr because after return from this exception handler the
* load / store will be re-executed.
*/
handle_vced:
- DMFC0 k0, CP0_BADVADDR
+ MFC0 k0, CP0_BADVADDR
li k1, -4 # Is this ...
and k0, k1 # ... really needed?
mtc0 zero, CP0_TAGLO
- cache Index_Store_Tag_D,(k0)
- cache Hit_Writeback_Inv_SD,(k0)
+ cache Index_Store_Tag_D, (k0)
+ cache Hit_Writeback_Inv_SD, (k0)
#ifdef CONFIG_PROC_FS
PTR_LA k0, vced_count
lw k1, (k0)
@@ -148,6 +148,38 @@ NESTED(except_vec_ejtag_debug, 0, sp)
__FINIT
/*
+ * Vectored interrupt handler.
+ * This prototype is copied to ebase + n*IntCtl.VS and patched
+ * to invoke the handler
+ */
+NESTED(except_vec_vi, 0, sp)
+ SAVE_SOME
+ SAVE_AT
+ .set push
+ .set noreorder
+EXPORT(except_vec_vi_lui)
+ lui v0, 0 /* Patched */
+ j except_vec_vi_handler
+EXPORT(except_vec_vi_ori)
+ ori v0, 0 /* Patched */
+ .set pop
+ END(except_vec_vi)
+EXPORT(except_vec_vi_end)
+
+/*
+ * Common Vectored Interrupt code
+ * Complete the register saves and invoke the handler which is passed in $v0
+ */
+NESTED(except_vec_vi_handler, 0, sp)
+ SAVE_TEMP
+ SAVE_STATIC
+ CLI
+ move a0, sp
+ jalr v0
+ j ret_from_irq
+ END(except_vec_vi_handler)
+
+/*
* EJTAG debug exception handler.
*/
NESTED(ejtag_debug_handler, PT_SIZE, sp)
@@ -291,6 +323,8 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
BUILD_HANDLER watch watch sti verbose /* #23 */
BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
+ BUILD_HANDLER mt mt sti verbose /* #25 */
+ BUILD_HANDLER dsp dsp sti silent /* #26 */
BUILD_HANDLER reserved reserved sti verbose /* others */
#ifdef CONFIG_64BIT
diff --git a/arch/mips/kernel/genrtc.c b/arch/mips/kernel/genrtc.c
deleted file mode 100644
index 71416e7bbbaa..000000000000
--- a/arch/mips/kernel/genrtc.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * A glue layer that provides RTC read/write to drivers/char/genrtc.c driver
- * based on MIPS internal RTC routines. It does take care locking
- * issues so that we are SMP/Preemption safe.
- *
- * Copyright (C) 2004 MontaVista Software Inc.
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- *
- * Please read the COPYING file for all license details.
- */
-
-#include <linux/spinlock.h>
-
-#include <asm/rtc.h>
-#include <asm/time.h>
-
-static DEFINE_SPINLOCK(mips_rtc_lock);
-
-unsigned int get_rtc_time(struct rtc_time *time)
-{
- unsigned long nowtime;
-
- spin_lock(&mips_rtc_lock);
- nowtime = rtc_get_time();
- to_tm(nowtime, time);
- time->tm_year -= 1900;
- spin_unlock(&mips_rtc_lock);
-
- return RTC_24H;
-}
-
-int set_rtc_time(struct rtc_time *time)
-{
- unsigned long nowtime;
- int ret;
-
- spin_lock(&mips_rtc_lock);
- nowtime = mktime(time->tm_year+1900, time->tm_mon+1,
- time->tm_mday, time->tm_hour, time->tm_min,
- time->tm_sec);
- ret = rtc_set_time(nowtime);
- spin_unlock(&mips_rtc_lock);
-
- return ret;
-}
-
-unsigned int get_rtc_ss(void)
-{
- struct rtc_time h;
-
- get_rtc_time(&h);
- return h.tm_sec;
-}
-
-int get_rtc_pll(struct rtc_pll_info *pll)
-{
- return -EINVAL;
-}
-
-int set_rtc_pll(struct rtc_pll_info *pll)
-{
- return -EINVAL;
-}
-
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 2a1b45d66f04..2e9122a4213a 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -22,11 +22,8 @@
#include <asm/page.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
-#ifdef CONFIG_SGI_IP27
-#include <asm/sn/addrs.h>
-#include <asm/sn/sn0/hubni.h>
-#include <asm/sn/klkernvars.h>
-#endif
+
+#include <kernel-entry-init.h>
.macro ARC64_TWIDDLE_PC
#if defined(CONFIG_ARC64) || defined(CONFIG_MAPPED_KERNEL)
@@ -38,18 +35,6 @@
#endif
.endm
-#ifdef CONFIG_SGI_IP27
- /*
- * outputs the local nasid into res. IP27 stuff.
- */
- .macro GET_NASID_ASM res
- dli \res, LOCAL_HUB_ADDR(NI_STATUS_REV_ID)
- ld \res, (\res)
- and \res, NSRI_NODEID_MASK
- dsrl \res, NSRI_NODEID_SHFT
- .endm
-#endif /* CONFIG_SGI_IP27 */
-
/*
* inputs are the text nasid in t1, data nasid in t2.
*/
@@ -131,16 +116,21 @@
EXPORT(stext) # used for profiling
EXPORT(_stext)
+#if defined(CONFIG_QEMU) || defined(CONFIG_MIPS_SIM)
+ /*
+ * Give us a fighting chance of running if execution beings at the
+ * kernel load address. This is needed because this platform does
+ * not have a ELF loader yet.
+ */
+ j kernel_entry
+#endif
__INIT
NESTED(kernel_entry, 16, sp) # kernel entry point
- setup_c0_status_pri
-#ifdef CONFIG_SGI_IP27
- GET_NASID_ASM t1
- move t2, t1 # text and data are here
- MAPPED_KERNEL_SETUP_TLB
-#endif /* IP27 */
+ kernel_entry_setup # cpu specific setup
+
+ setup_c0_status_pri
ARC64_TWIDDLE_PC
@@ -157,6 +147,7 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
LONG_S a2, fw_arg2
LONG_S a3, fw_arg3
+ MTC0 zero, CP0_CONTEXT # clear context register
PTR_LA $28, init_thread_union
PTR_ADDIU sp, $28, _THREAD_SIZE - 32
set_saved_sp sp, t0, t1
@@ -165,6 +156,10 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
j start_kernel
END(kernel_entry)
+#ifdef CONFIG_QEMU
+ __INIT
+#endif
+
#ifdef CONFIG_SMP
/*
* SMP slave cpus entry point. Board specific code for bootstrap calls this
@@ -172,20 +167,7 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
*/
NESTED(smp_bootstrap, 16, sp)
setup_c0_status_sec
-
-#ifdef CONFIG_SGI_IP27
- GET_NASID_ASM t1
- dli t0, KLDIR_OFFSET + (KLI_KERN_VARS * KLDIR_ENT_SIZE) + \
- KLDIR_OFF_POINTER + CAC_BASE
- dsll t1, NASID_SHFT
- or t0, t0, t1
- ld t0, 0(t0) # t0 points to kern_vars struct
- lh t1, KV_RO_NASID_OFFSET(t0)
- lh t2, KV_RW_NASID_OFFSET(t0)
- MAPPED_KERNEL_SETUP_TLB
- ARC64_TWIDDLE_PC
-#endif /* CONFIG_SGI_IP27 */
-
+ smp_slave_setup
j start_secondary
END(smp_bootstrap)
#endif /* CONFIG_SMP */
@@ -200,19 +182,13 @@ NESTED(smp_bootstrap, 16, sp)
.comm fw_arg2, SZREG, SZREG
.comm fw_arg3, SZREG, SZREG
- .macro page name, order=0
- .globl \name
-\name: .size \name, (_PAGE_SIZE << \order)
- .org . + (_PAGE_SIZE << \order)
- .type \name, @object
+ .macro page name, order
+ .comm \name, (_PAGE_SIZE << \order), (_PAGE_SIZE << \order)
.endm
- .data
- .align PAGE_SHIFT
-
/*
- * ... but on 64-bit we've got three-level pagetables with a
- * slightly different layout ...
+ * On 64-bit we've got three-level pagetables with a slightly
+ * different layout ...
*/
page swapper_pg_dir, _PGD_ORDER
#ifdef CONFIG_64BIT
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 447759201d1d..b974ac9057f6 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -31,7 +31,7 @@ void disable_8259A_irq(unsigned int irq);
* moves to arch independent land
*/
-spinlock_t DEFINE_SPINLOCK(i8259A_lock);
+DEFINE_SPINLOCK(i8259A_lock);
static void end_8259A_irq (unsigned int irq)
{
@@ -52,14 +52,13 @@ static unsigned int startup_8259A_irq(unsigned int irq)
}
static struct hw_interrupt_type i8259A_irq_type = {
- "XT-PIC",
- startup_8259A_irq,
- shutdown_8259A_irq,
- enable_8259A_irq,
- disable_8259A_irq,
- mask_and_ack_8259A,
- end_8259A_irq,
- NULL
+ .typename = "XT-PIC",
+ .startup = startup_8259A_irq,
+ .shutdown = shutdown_8259A_irq,
+ .enable = enable_8259A_irq,
+ .disable = disable_8259A_irq,
+ .ack = mask_and_ack_8259A,
+ .end = end_8259A_irq,
};
/*
@@ -308,7 +307,7 @@ static struct resource pic2_io_resource = {
/*
* On systems with i8259-style interrupt controllers we assume for
- * driver compatibility reasons interrupts 0 - 15 to be the i8295
+ * driver compatibility reasons interrupts 0 - 15 to be the i8259
* interrupts even if the hardware uses a different interrupt numbering.
*/
void __init init_i8259_irqs (void)
@@ -322,7 +321,7 @@ void __init init_i8259_irqs (void)
for (i = 0; i < 16; i++) {
irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = 0;
+ irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
irq_desc[i].handler = &i8259A_irq_type;
}
diff --git a/arch/mips/kernel/ioctl32.c b/arch/mips/kernel/ioctl32.c
index c069719ff0d8..ed9b2da510be 100644
--- a/arch/mips/kernel/ioctl32.c
+++ b/arch/mips/kernel/ioctl32.c
@@ -41,12 +41,6 @@ IOCTL_TABLE_START
#define DECLARES
#include "compat_ioctl.c"
-#ifdef CONFIG_SIBYTE_TBPROF
-COMPATIBLE_IOCTL(SBPROF_ZBSTART)
-COMPATIBLE_IOCTL(SBPROF_ZBSTOP)
-COMPATIBLE_IOCTL(SBPROF_ZBWAITFULL)
-#endif /* CONFIG_SIBYTE_TBPROF */
-
/*HANDLE_IOCTL(RTC_IRQP_READ, w_long)
COMPATIBLE_IOCTL(RTC_IRQP_SET)
HANDLE_IOCTL(RTC_EPOCH_READ, w_long)
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index 4af20cd91f9f..10d3644e3608 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -8,7 +8,7 @@
*
* Copyright (C) 1993 - 1994 Eric Youngdale <ericy@cais.com>
* Copyright (C) 1996 - 2004 David S. Miller <dm@engr.sgi.com>
- * Copyright (C) 2004 Steven J. Hill <sjhill@realitydiluted.com>
+ * Copyright (C) 2004 - 2005 Steven J. Hill <sjhill@realitydiluted.com>
*/
#include <linux/module.h>
#include <linux/fs.h>
@@ -31,15 +31,16 @@
#include <linux/elfcore.h>
#include <linux/smp_lock.h>
-#include <asm/uaccess.h>
#include <asm/mipsregs.h>
+#include <asm/namei.h>
#include <asm/prctl.h>
+#include <asm/uaccess.h>
#define DLINFO_ITEMS 12
#include <linux/elf.h>
-#undef DEBUG_ELF
+#undef DEBUG
static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs);
static int load_irix_library(struct file *);
@@ -55,7 +56,7 @@ static struct linux_binfmt irix_format = {
#define elf_addr_t unsigned long
#endif
-#ifdef DEBUG_ELF
+#ifdef DEBUG
/* Debugging routines. */
static char *get_elf_p_type(Elf32_Word p_type)
{
@@ -120,7 +121,7 @@ static void dump_phdrs(struct elf_phdr *ep, int pnum)
print_phdr(i, ep);
}
}
-#endif /* (DEBUG_ELF) */
+#endif /* DEBUG */
static void set_brk(unsigned long start, unsigned long end)
{
@@ -146,20 +147,20 @@ static void padzero(unsigned long elf_bss)
nbyte = elf_bss & (PAGE_SIZE-1);
if (nbyte) {
nbyte = PAGE_SIZE - nbyte;
- clear_user((void *) elf_bss, nbyte);
+ clear_user((void __user *) elf_bss, nbyte);
}
}
-unsigned long * create_irix_tables(char * p, int argc, int envc,
- struct elfhdr * exec, unsigned int load_addr,
- unsigned int interp_load_addr,
- struct pt_regs *regs, struct elf_phdr *ephdr)
+static unsigned long * create_irix_tables(char * p, int argc, int envc,
+ struct elfhdr * exec, unsigned int load_addr,
+ unsigned int interp_load_addr, struct pt_regs *regs,
+ struct elf_phdr *ephdr)
{
elf_addr_t *argv;
elf_addr_t *envp;
elf_addr_t *sp, *csp;
-#ifdef DEBUG_ELF
+#ifdef DEBUG
printk("create_irix_tables: p[%p] argc[%d] envc[%d] "
"load_addr[%08x] interp_load_addr[%08x]\n",
p, argc, envc, load_addr, interp_load_addr);
@@ -248,14 +249,13 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
last_bss = 0;
error = load_addr = 0;
-#ifdef DEBUG_ELF
+#ifdef DEBUG
print_elfhdr(interp_elf_ex);
#endif
/* First of all, some simple consistency checks */
if ((interp_elf_ex->e_type != ET_EXEC &&
interp_elf_ex->e_type != ET_DYN) ||
- !irix_elf_check_arch(interp_elf_ex) ||
!interpreter->f_op->mmap) {
printk("IRIX interp has bad e_type %d\n", interp_elf_ex->e_type);
return 0xffffffff;
@@ -290,7 +290,7 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
(char *) elf_phdata,
sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
-#ifdef DEBUG_ELF
+#ifdef DEBUG
dump_phdrs(elf_phdata, interp_elf_ex->e_phnum);
#endif
@@ -306,13 +306,11 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
elf_type |= MAP_FIXED;
vaddr = eppnt->p_vaddr;
-#ifdef DEBUG_ELF
- printk("INTERP do_mmap(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
+ pr_debug("INTERP do_mmap(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
interpreter, vaddr,
(unsigned long) (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)),
(unsigned long) elf_prot, (unsigned long) elf_type,
(unsigned long) (eppnt->p_offset & 0xfffff000));
-#endif
down_write(&current->mm->mmap_sem);
error = do_mmap(interpreter, vaddr,
eppnt->p_filesz + (eppnt->p_vaddr & 0xfff),
@@ -324,14 +322,10 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
printk("Aieee IRIX interp mmap error=%d\n", error);
break; /* Real error */
}
-#ifdef DEBUG_ELF
- printk("error=%08lx ", (unsigned long) error);
-#endif
+ pr_debug("error=%08lx ", (unsigned long) error);
if(!load_addr && interp_elf_ex->e_type == ET_DYN) {
load_addr = error;
-#ifdef DEBUG_ELF
- printk("load_addr = error ");
-#endif
+ pr_debug("load_addr = error ");
}
/* Find the end of the file mapping for this phdr, and keep
@@ -345,17 +339,13 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
*/
k = eppnt->p_memsz + eppnt->p_vaddr;
if(k > last_bss) last_bss = k;
-#ifdef DEBUG_ELF
- printk("\n");
-#endif
+ pr_debug("\n");
}
}
/* Now use mmap to map the library into memory. */
if(error < 0 && error > -1024) {
-#ifdef DEBUG_ELF
- printk("got error %d\n", error);
-#endif
+ pr_debug("got error %d\n", error);
kfree(elf_phdata);
return 0xffffffff;
}
@@ -365,16 +355,12 @@ static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
* that there are zero-mapped pages up to and including the
* last bss page.
*/
-#ifdef DEBUG_ELF
- printk("padzero(%08lx) ", (unsigned long) (elf_bss));
-#endif
+ pr_debug("padzero(%08lx) ", (unsigned long) (elf_bss));
padzero(elf_bss);
len = (elf_bss + 0xfff) & 0xfffff000; /* What we have mapped so far */
-#ifdef DEBUG_ELF
- printk("last_bss[%08lx] len[%08lx]\n", (unsigned long) last_bss,
- (unsigned long) len);
-#endif
+ pr_debug("last_bss[%08lx] len[%08lx]\n", (unsigned long) last_bss,
+ (unsigned long) len);
/* Map the last of the bss segment */
if (last_bss > len) {
@@ -396,12 +382,7 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
/* First of all, some simple consistency checks */
if((ehp->e_type != ET_EXEC && ehp->e_type != ET_DYN) ||
- !irix_elf_check_arch(ehp) || !bprm->file->f_op->mmap) {
- return -ENOEXEC;
- }
-
- /* Only support MIPS ARCH2 or greater IRIX binaries for now. */
- if(!(ehp->e_flags & EF_MIPS_ARCH) && !(ehp->e_flags & 0x04)) {
+ !bprm->file->f_op->mmap) {
return -ENOEXEC;
}
@@ -411,16 +392,17 @@ static int verify_binary(struct elfhdr *ehp, struct linux_binprm *bprm)
* XXX all registers as 64bits on cpu's capable of this at
* XXX exception time plus frob the XTLB exception vector.
*/
- if((ehp->e_flags & 0x20)) {
+ if((ehp->e_flags & EF_MIPS_ABI2))
return -ENOEXEC;
- }
- return 0; /* It's ok. */
+ return 0;
}
-#define IRIX_INTERP_PREFIX "/usr/gnemul/irix"
-
-/* Look for an IRIX ELF interpreter. */
+/*
+ * This is where the detailed check is performed. Irix binaries
+ * use interpreters with 'libc.so' in the name, so this function
+ * can differentiate between Linux and Irix binaries.
+ */
static inline int look_for_irix_interpreter(char **name,
struct file **interpreter,
struct elfhdr *interp_elf_ex,
@@ -440,12 +422,11 @@ static inline int look_for_irix_interpreter(char **name,
if (*name != NULL)
goto out;
- *name = kmalloc((epp->p_filesz + strlen(IRIX_INTERP_PREFIX)),
- GFP_KERNEL);
+ *name = kmalloc(epp->p_filesz + strlen(IRIX_EMUL), GFP_KERNEL);
if (!*name)
return -ENOMEM;
- strcpy(*name, IRIX_INTERP_PREFIX);
+ strcpy(*name, IRIX_EMUL);
retval = kernel_read(bprm->file, epp->p_offset, (*name + 16),
epp->p_filesz);
if (retval < 0)
@@ -562,7 +543,7 @@ static inline int map_interpreter(struct elf_phdr *epp, struct elfhdr *ihp,
* process and the system, here we map the page and fill the
* structure
*/
-void irix_map_prda_page (void)
+static void irix_map_prda_page(void)
{
unsigned long v;
struct prda *pp;
@@ -601,14 +582,33 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
load_addr = 0;
has_interp = has_ephdr = 0;
- elf_ihdr = elf_ephdr = 0;
+ elf_ihdr = elf_ephdr = NULL;
elf_ex = *((struct elfhdr *) bprm->buf);
retval = -ENOEXEC;
if (verify_binary(&elf_ex, bprm))
goto out;
-#ifdef DEBUG_ELF
+ /*
+ * Telling -o32 static binaries from Linux and Irix apart from each
+ * other is difficult. There are 2 differences to be noted for static
+ * binaries from the 2 operating systems:
+ *
+ * 1) Irix binaries have their .text section before their .init
+ * section. Linux binaries are just the opposite.
+ *
+ * 2) Irix binaries usually have <= 12 sections and Linux
+ * binaries have > 20.
+ *
+ * We will use Method #2 since Method #1 would require us to read in
+ * the section headers which is way too much overhead. This appears
+ * to work for everything we have ran into so far. If anyone has a
+ * better method to tell the binaries apart, I'm listening.
+ */
+ if (elf_ex.e_shnum > 20)
+ goto out;
+
+#ifdef DEBUG
print_elfhdr(&elf_ex);
#endif
@@ -623,11 +623,10 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
}
retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *)elf_phdata, size);
-
if (retval < 0)
goto out_free_ph;
-#ifdef DEBUG_ELF
+#ifdef DEBUG
dump_phdrs(elf_phdata, elf_ex.e_phnum);
#endif
@@ -644,9 +643,8 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
break;
};
}
-#ifdef DEBUG_ELF
- printk("\n");
-#endif
+
+ pr_debug("\n");
elf_bss = 0;
elf_brk = 0;
@@ -657,12 +655,19 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
end_code = 0;
end_data = 0;
- retval = look_for_irix_interpreter(&elf_interpreter,
- &interpreter,
+ /*
+ * If we get a return value, we change the value to be ENOEXEC
+ * so that we can exit gracefully and the main binary format
+ * search loop in 'fs/exec.c' will move onto the next handler
+ * which should be the normal ELF binary handler.
+ */
+ retval = look_for_irix_interpreter(&elf_interpreter, &interpreter,
&interp_elf_ex, elf_phdata, bprm,
elf_ex.e_phnum);
- if (retval)
+ if (retval) {
+ retval = -ENOEXEC;
goto out_free_file;
+ }
if (elf_interpreter) {
retval = verify_irix_interpreter(&interp_elf_ex);
@@ -692,7 +697,6 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
/* Do this so that we can load the interpreter, if need be. We will
* change some of these later.
*/
- set_mm_counter(current->mm, rss, 0);
setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
current->mm->start_stack = bprm->p;
@@ -746,18 +750,16 @@ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs)
* IRIX maps a page at 0x200000 which holds some system
* information. Programs depend on this.
*/
- irix_map_prda_page ();
+ irix_map_prda_page();
padzero(elf_bss);
-#ifdef DEBUG_ELF
- printk("(start_brk) %lx\n" , (long) current->mm->start_brk);
- printk("(end_code) %lx\n" , (long) current->mm->end_code);
- printk("(start_code) %lx\n" , (long) current->mm->start_code);
- printk("(end_data) %lx\n" , (long) current->mm->end_data);
- printk("(start_stack) %lx\n" , (long) current->mm->start_stack);
- printk("(brk) %lx\n" , (long) current->mm->brk);
-#endif
+ pr_debug("(start_brk) %lx\n" , (long) current->mm->start_brk);
+ pr_debug("(end_code) %lx\n" , (long) current->mm->end_code);
+ pr_debug("(start_code) %lx\n" , (long) current->mm->start_code);
+ pr_debug("(end_data) %lx\n" , (long) current->mm->end_data);
+ pr_debug("(start_stack) %lx\n" , (long) current->mm->start_stack);
+ pr_debug("(brk) %lx\n" , (long) current->mm->brk);
#if 0 /* XXX No fucking way dude... */
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
@@ -782,8 +784,7 @@ out_free_dentry:
allow_write_access(interpreter);
fput(interpreter);
out_free_interp:
- if (elf_interpreter)
- kfree(elf_interpreter);
+ kfree(elf_interpreter);
out_free_file:
out_free_ph:
kfree (elf_phdata);
@@ -813,7 +814,7 @@ static int load_irix_library(struct file *file)
/* First of all, some simple consistency checks. */
if(elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
- !irix_elf_check_arch(&elf_ex) || !file->f_op->mmap)
+ !file->f_op->mmap)
return -ENOEXEC;
/* Now read in all of the header information. */
@@ -874,35 +875,36 @@ static int load_irix_library(struct file *file)
* phdrs there are in the USER_PHDRP array. We return the vaddr the
* first phdr was successfully mapped to.
*/
-unsigned long irix_mapelf(int fd, struct elf_phdr *user_phdrp, int cnt)
+unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt)
{
- struct elf_phdr *hp;
+ unsigned long type, vaddr, filesz, offset, flags;
+ struct elf_phdr __user *hp;
struct file *filp;
int i, retval;
-#ifdef DEBUG_ELF
- printk("irix_mapelf: fd[%d] user_phdrp[%p] cnt[%d]\n",
- fd, user_phdrp, cnt);
-#endif
+ pr_debug("irix_mapelf: fd[%d] user_phdrp[%p] cnt[%d]\n",
+ fd, user_phdrp, cnt);
/* First get the verification out of the way. */
hp = user_phdrp;
if (!access_ok(VERIFY_READ, hp, (sizeof(struct elf_phdr) * cnt))) {
-#ifdef DEBUG_ELF
- printk("irix_mapelf: access_ok fails!\n");
-#endif
+ pr_debug("irix_mapelf: bad pointer to ELF PHDR!\n");
+
return -EFAULT;
}
-#ifdef DEBUG_ELF
+#ifdef DEBUG
dump_phdrs(user_phdrp, cnt);
#endif
- for(i = 0; i < cnt; i++, hp++)
- if(hp->p_type != PT_LOAD) {
+ for (i = 0; i < cnt; i++, hp++) {
+ if (__get_user(type, &hp->p_type))
+ return -EFAULT;
+ if (type != PT_LOAD) {
printk("irix_mapelf: One section is not PT_LOAD!\n");
return -ENOEXEC;
}
+ }
filp = fget(fd);
if (!filp)
@@ -917,29 +919,40 @@ unsigned long irix_mapelf(int fd, struct elf_phdr *user_phdrp, int cnt)
for(i = 0; i < cnt; i++, hp++) {
int prot;
- prot = (hp->p_flags & PF_R) ? PROT_READ : 0;
- prot |= (hp->p_flags & PF_W) ? PROT_WRITE : 0;
- prot |= (hp->p_flags & PF_X) ? PROT_EXEC : 0;
+ retval = __get_user(vaddr, &hp->p_vaddr);
+ retval |= __get_user(filesz, &hp->p_filesz);
+ retval |= __get_user(offset, &hp->p_offset);
+ retval |= __get_user(flags, &hp->p_flags);
+ if (retval)
+ return retval;
+
+ prot = (flags & PF_R) ? PROT_READ : 0;
+ prot |= (flags & PF_W) ? PROT_WRITE : 0;
+ prot |= (flags & PF_X) ? PROT_EXEC : 0;
+
down_write(&current->mm->mmap_sem);
- retval = do_mmap(filp, (hp->p_vaddr & 0xfffff000),
- (hp->p_filesz + (hp->p_vaddr & 0xfff)),
+ retval = do_mmap(filp, (vaddr & 0xfffff000),
+ (filesz + (vaddr & 0xfff)),
prot, (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
- (hp->p_offset & 0xfffff000));
+ (offset & 0xfffff000));
up_write(&current->mm->mmap_sem);
- if(retval != (hp->p_vaddr & 0xfffff000)) {
+ if (retval != (vaddr & 0xfffff000)) {
printk("irix_mapelf: do_mmap fails with %d!\n", retval);
fput(filp);
return retval;
}
}
-#ifdef DEBUG_ELF
- printk("irix_mapelf: Success, returning %08lx\n",
- (unsigned long) user_phdrp->p_vaddr);
-#endif
+ pr_debug("irix_mapelf: Success, returning %08lx\n",
+ (unsigned long) user_phdrp->p_vaddr);
+
fput(filp);
- return user_phdrp->p_vaddr;
+
+ if (__get_user(vaddr, &user_phdrp->p_vaddr))
+ return -EFAULT;
+
+ return vaddr;
}
/*
@@ -952,9 +965,9 @@ unsigned long irix_mapelf(int fd, struct elf_phdr *user_phdrp, int cnt)
/* These are the only things you should do on a core-file: use only these
* functions to write out all the necessary info.
*/
-static int dump_write(struct file *file, const void *addr, int nr)
+static int dump_write(struct file *file, const void __user *addr, int nr)
{
- return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
+ return file->f_op->write(file, (const char __user *) addr, nr, &file->f_pos) == nr;
}
static int dump_seek(struct file *file, off_t off)
@@ -1064,8 +1077,8 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
struct elfhdr elf;
off_t offset = 0, dataoff;
int limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
- int numnote = 4;
- struct memelfnote notes[4];
+ int numnote = 3;
+ struct memelfnote notes[3];
struct elf_prstatus prstatus; /* NT_PRSTATUS */
elf_fpregset_t fpu; /* NT_PRFPREG */
struct elf_prpsinfo psinfo; /* NT_PRPSINFO */
@@ -1073,7 +1086,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
/* Count what's needed to dump, up to the limit of coredump size. */
segs = 0;
size = 0;
- for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
+ for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
if (maydump(vma))
{
int sz = vma->vm_end-vma->vm_start;
@@ -1187,9 +1200,9 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
len = current->mm->arg_end - current->mm->arg_start;
len = len >= ELF_PRARGSZ ? ELF_PRARGSZ : len;
- copy_from_user(&psinfo.pr_psargs,
- (const char *)current->mm->arg_start, len);
- for(i = 0; i < len; i++)
+ (void *) copy_from_user(&psinfo.pr_psargs,
+ (const char __user *)current->mm->arg_start, len);
+ for (i = 0; i < len; i++)
if (psinfo.pr_psargs[i] == 0)
psinfo.pr_psargs[i] = ' ';
psinfo.pr_psargs[len] = 0;
@@ -1198,20 +1211,15 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
}
strlcpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname));
- notes[2].name = "CORE";
- notes[2].type = NT_TASKSTRUCT;
- notes[2].datasz = sizeof(*current);
- notes[2].data = current;
-
/* Try to dump the FPU. */
prstatus.pr_fpvalid = dump_fpu (regs, &fpu);
if (!prstatus.pr_fpvalid) {
numnote--;
} else {
- notes[3].name = "CORE";
- notes[3].type = NT_PRFPREG;
- notes[3].datasz = sizeof(fpu);
- notes[3].data = &fpu;
+ notes[2].name = "CORE";
+ notes[2].type = NT_PRFPREG;
+ notes[2].datasz = sizeof(fpu);
+ notes[2].data = &fpu;
}
/* Write notes phdr entry. */
@@ -1256,8 +1264,10 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
phdr.p_memsz = sz;
offset += phdr.p_filesz;
phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
- if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
- if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
+ if (vma->vm_flags & VM_WRITE)
+ phdr.p_flags |= PF_W;
+ if (vma->vm_flags & VM_EXEC)
+ phdr.p_flags |= PF_X;
phdr.p_align = PAGE_SIZE;
DUMP_WRITE(&phdr, sizeof(phdr));
@@ -1283,7 +1293,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
#ifdef DEBUG
printk("elf_core_dump: writing %08lx %lx\n", addr, len);
#endif
- DUMP_WRITE((void *)addr, len);
+ DUMP_WRITE((void __user *)addr, len);
}
if ((off_t) file->f_pos != offset) {
@@ -1299,7 +1309,7 @@ end_coredump:
static int __init init_irix_binfmt(void)
{
- int init_inventory(void);
+ extern int init_inventory(void);
extern asmlinkage unsigned long sys_call_table;
extern asmlinkage unsigned long sys_call_table_irix5;
@@ -1318,7 +1328,9 @@ static int __init init_irix_binfmt(void)
static void __exit exit_irix_binfmt(void)
{
- /* Remove the IRIX ELF loaders. */
+ /*
+ * Remove the Irix ELF loader.
+ */
unregister_binfmt(&irix_format);
}
diff --git a/arch/mips/kernel/irixinv.c b/arch/mips/kernel/irixinv.c
index 60aa98cd1791..de8584f62311 100644
--- a/arch/mips/kernel/irixinv.c
+++ b/arch/mips/kernel/irixinv.c
@@ -30,10 +30,10 @@ void add_to_inventory (int class, int type, int controller, int unit, int state)
inventory_items++;
}
-int dump_inventory_to_user (void *userbuf, int size)
+int dump_inventory_to_user (void __user *userbuf, int size)
{
inventory_t *inv = &inventory [0];
- inventory_t *user = userbuf;
+ inventory_t __user *user = userbuf;
int v;
if (!access_ok(VERIFY_WRITE, userbuf, size))
@@ -41,7 +41,8 @@ int dump_inventory_to_user (void *userbuf, int size)
for (v = 0; v < inventory_items; v++){
inv = &inventory [v];
- copy_to_user (user, inv, sizeof (inventory_t));
+ if (copy_to_user (user, inv, sizeof (inventory_t)))
+ return -EFAULT;
user++;
}
return inventory_items * sizeof (inventory_t);
diff --git a/arch/mips/kernel/irixioctl.c b/arch/mips/kernel/irixioctl.c
index 3cdc22346f4c..e2863821a3dd 100644
--- a/arch/mips/kernel/irixioctl.c
+++ b/arch/mips/kernel/irixioctl.c
@@ -59,7 +59,7 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg)
{
struct tty_struct *tp, *rtp;
mm_segment_t old_fs;
- int error = 0;
+ int i, error = 0;
#ifdef DEBUG_IOCTLS
printk("[%s:%d] irix_ioctl(%d, ", current->comm, current->pid, fd);
@@ -74,12 +74,13 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg)
case 0x0000540d: {
struct termios kt;
- struct irix_termios *it = (struct irix_termios *) arg;
+ struct irix_termios __user *it =
+ (struct irix_termios __user *) arg;
#ifdef DEBUG_IOCTLS
printk("TCGETS, %08lx) ", arg);
#endif
- if(!access_ok(VERIFY_WRITE, it, sizeof(*it))) {
+ if (!access_ok(VERIFY_WRITE, it, sizeof(*it))) {
error = -EFAULT;
break;
}
@@ -88,13 +89,14 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg)
set_fs(old_fs);
if (error)
break;
- __put_user(kt.c_iflag, &it->c_iflag);
- __put_user(kt.c_oflag, &it->c_oflag);
- __put_user(kt.c_cflag, &it->c_cflag);
- __put_user(kt.c_lflag, &it->c_lflag);
- for(error = 0; error < NCCS; error++)
- __put_user(kt.c_cc[error], &it->c_cc[error]);
- error = 0;
+
+ error = __put_user(kt.c_iflag, &it->c_iflag);
+ error |= __put_user(kt.c_oflag, &it->c_oflag);
+ error |= __put_user(kt.c_cflag, &it->c_cflag);
+ error |= __put_user(kt.c_lflag, &it->c_lflag);
+
+ for (i = 0; i < NCCS; i++)
+ error |= __put_user(kt.c_cc[i], &it->c_cc[i]);
break;
}
@@ -112,14 +114,19 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg)
old_fs = get_fs(); set_fs(get_ds());
error = sys_ioctl(fd, TCGETS, (unsigned long) &kt);
set_fs(old_fs);
- if(error)
+ if (error)
+ break;
+
+ error = __get_user(kt.c_iflag, &it->c_iflag);
+ error |= __get_user(kt.c_oflag, &it->c_oflag);
+ error |= __get_user(kt.c_cflag, &it->c_cflag);
+ error |= __get_user(kt.c_lflag, &it->c_lflag);
+
+ for (i = 0; i < NCCS; i++)
+ error |= __get_user(kt.c_cc[i], &it->c_cc[i]);
+
+ if (error)
break;
- __get_user(kt.c_iflag, &it->c_iflag);
- __get_user(kt.c_oflag, &it->c_oflag);
- __get_user(kt.c_cflag, &it->c_cflag);
- __get_user(kt.c_lflag, &it->c_lflag);
- for(error = 0; error < NCCS; error++)
- __get_user(kt.c_cc[error], &it->c_cc[error]);
old_fs = get_fs(); set_fs(get_ds());
error = sys_ioctl(fd, TCSETS, (unsigned long) &kt);
set_fs(old_fs);
@@ -153,7 +160,7 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg)
#ifdef DEBUG_IOCTLS
printk("rtp->session=%d ", rtp->session);
#endif
- error = put_user(rtp->session, (unsigned long *) arg);
+ error = put_user(rtp->session, (unsigned long __user *) arg);
break;
case 0x746e:
@@ -195,50 +202,32 @@ asmlinkage int irix_ioctl(int fd, unsigned long cmd, unsigned long arg)
break;
case 0x8004667e:
-#ifdef DEBUG_IOCTLS
- printk("FIONBIO, %08lx) arg=%d ", arg, *(int *)arg);
-#endif
error = sys_ioctl(fd, FIONBIO, arg);
break;
case 0x80047476:
-#ifdef DEBUG_IOCTLS
- printk("TIOCSPGRP, %08lx) arg=%d ", arg, *(int *)arg);
-#endif
error = sys_ioctl(fd, TIOCSPGRP, arg);
break;
case 0x8020690c:
-#ifdef DEBUG_IOCTLS
- printk("SIOCSIFADDR, %08lx) arg=%d ", arg, *(int *)arg);
-#endif
error = sys_ioctl(fd, SIOCSIFADDR, arg);
break;
case 0x80206910:
-#ifdef DEBUG_IOCTLS
- printk("SIOCSIFFLAGS, %08lx) arg=%d ", arg, *(int *)arg);
-#endif
error = sys_ioctl(fd, SIOCSIFFLAGS, arg);
break;
case 0xc0206911:
-#ifdef DEBUG_IOCTLS
- printk("SIOCGIFFLAGS, %08lx) arg=%d ", arg, *(int *)arg);
-#endif
error = sys_ioctl(fd, SIOCGIFFLAGS, arg);
break;
case 0xc020691b:
-#ifdef DEBUG_IOCTLS
- printk("SIOCGIFMETRIC, %08lx) arg=%d ", arg, *(int *)arg);
-#endif
error = sys_ioctl(fd, SIOCGIFMETRIC, arg);
break;
default: {
#ifdef DEBUG_MISSING_IOCTL
- char *msg = "Unimplemented IOCTL cmd tell linux@engr.sgi.com\n";
+ char *msg = "Unimplemented IOCTL cmd tell linux-mips@linux-mips.org\n";
#ifdef DEBUG_IOCTLS
printk("UNIMP_IOCTL, %08lx)\n", arg);
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index eff89322ba50..908e63684208 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -76,36 +76,39 @@ static inline void dump_irix5_sigctx(struct sigctx_irix5 *c)
}
#endif
-static void setup_irix_frame(struct k_sigaction *ka, struct pt_regs *regs,
- int signr, sigset_t *oldmask)
+static int setup_irix_frame(struct k_sigaction *ka, struct pt_regs *regs,
+ int signr, sigset_t *oldmask)
{
+ struct sigctx_irix5 __user *ctx;
unsigned long sp;
- struct sigctx_irix5 *ctx;
- int i;
+ int error, i;
sp = regs->regs[29];
sp -= sizeof(struct sigctx_irix5);
sp &= ~(0xf);
- ctx = (struct sigctx_irix5 *) sp;
+ ctx = (struct sigctx_irix5 __user *) sp;
if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx)))
goto segv_and_exit;
- __put_user(0, &ctx->weird_fpu_thing);
- __put_user(~(0x00000001), &ctx->rmask);
- __put_user(0, &ctx->regs[0]);
+ error = __put_user(0, &ctx->weird_fpu_thing);
+ error |= __put_user(~(0x00000001), &ctx->rmask);
+ error |= __put_user(0, &ctx->regs[0]);
for(i = 1; i < 32; i++)
- __put_user((u64) regs->regs[i], &ctx->regs[i]);
+ error |= __put_user((u64) regs->regs[i], &ctx->regs[i]);
+
+ error |= __put_user((u64) regs->hi, &ctx->hi);
+ error |= __put_user((u64) regs->lo, &ctx->lo);
+ error |= __put_user((u64) regs->cp0_epc, &ctx->pc);
+ error |= __put_user(!!used_math(), &ctx->usedfp);
+ error |= __put_user((u64) regs->cp0_cause, &ctx->cp0_cause);
+ error |= __put_user((u64) regs->cp0_badvaddr, &ctx->cp0_badvaddr);
- __put_user((u64) regs->hi, &ctx->hi);
- __put_user((u64) regs->lo, &ctx->lo);
- __put_user((u64) regs->cp0_epc, &ctx->pc);
- __put_user(!!used_math(), &ctx->usedfp);
- __put_user((u64) regs->cp0_cause, &ctx->cp0_cause);
- __put_user((u64) regs->cp0_badvaddr, &ctx->cp0_badvaddr);
+ error |= __put_user(0, &ctx->sstk_flags); /* XXX sigstack unimp... todo... */
- __put_user(0, &ctx->sstk_flags); /* XXX sigstack unimp... todo... */
+ error |= __copy_to_user(&ctx->sigset, oldmask, sizeof(irix_sigset_t)) ? -EFAULT : 0;
- __copy_to_user(&ctx->sigset, oldmask, sizeof(irix_sigset_t));
+ if (error)
+ goto segv_and_exit;
#ifdef DEBUG_SIG
dump_irix5_sigctx(ctx);
@@ -117,13 +120,14 @@ static void setup_irix_frame(struct k_sigaction *ka, struct pt_regs *regs,
regs->regs[7] = (unsigned long) ka->sa.sa_handler;
regs->regs[25] = regs->cp0_epc = (unsigned long) ka->sa_restorer;
- return;
+ return 1;
segv_and_exit:
force_sigsegv(signr, current);
+ return 0;
}
-static void inline
+static int inline
setup_irix_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
int signr, sigset_t *oldmask, siginfo_t *info)
{
@@ -131,9 +135,11 @@ setup_irix_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
do_exit(SIGSEGV);
}
-static inline void handle_signal(unsigned long sig, siginfo_t *info,
+static inline int handle_signal(unsigned long sig, siginfo_t *info,
struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs)
{
+ int ret;
+
switch(regs->regs[0]) {
case ERESTARTNOHAND:
regs->regs[2] = EINTR;
@@ -151,9 +157,9 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info,
regs->regs[0] = 0; /* Don't deal with this again. */
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_irix_rt_frame(ka, regs, sig, oldset, info);
+ ret = setup_irix_rt_frame(ka, regs, sig, oldset, info);
else
- setup_irix_frame(ka, regs, sig, oldset);
+ ret = setup_irix_frame(ka, regs, sig, oldset);
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -161,6 +167,8 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info,
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
+
+ return ret;
}
asmlinkage int do_irix_signal(sigset_t *oldset, struct pt_regs *regs)
@@ -184,10 +192,8 @@ asmlinkage int do_irix_signal(sigset_t *oldset, struct pt_regs *regs)
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0) {
- handle_signal(signr, &info, &ka, oldset, regs);
- return 1;
- }
+ if (signr > 0)
+ return handle_signal(signr, &info, &ka, oldset, regs);
no_signal:
/*
@@ -208,10 +214,11 @@ no_signal:
asmlinkage void
irix_sigreturn(struct pt_regs *regs)
{
- struct sigctx_irix5 *context, *magic;
+ struct sigctx_irix5 __user *context, *magic;
unsigned long umask, mask;
u64 *fregs;
- int sig, i, base = 0;
+ u32 usedfp;
+ int error, sig, i, base = 0;
sigset_t blocked;
/* Always make any pending restarted system calls return -EINTR */
@@ -220,8 +227,8 @@ irix_sigreturn(struct pt_regs *regs)
if (regs->regs[2] == 1000)
base = 1;
- context = (struct sigctx_irix5 *) regs->regs[base + 4];
- magic = (struct sigctx_irix5 *) regs->regs[base + 5];
+ context = (struct sigctx_irix5 __user *) regs->regs[base + 4];
+ magic = (struct sigctx_irix5 __user *) regs->regs[base + 5];
sig = (int) regs->regs[base + 6];
#ifdef DEBUG_SIG
printk("[%s:%d] IRIX sigreturn(scp[%p],ucp[%p],sig[%d])\n",
@@ -236,25 +243,31 @@ irix_sigreturn(struct pt_regs *regs)
dump_irix5_sigctx(context);
#endif
- __get_user(regs->cp0_epc, &context->pc);
- umask = context->rmask; mask = 2;
+ error = __get_user(regs->cp0_epc, &context->pc);
+ error |= __get_user(umask, &context->rmask);
+
+ mask = 2;
for (i = 1; i < 32; i++, mask <<= 1) {
- if(umask & mask)
- __get_user(regs->regs[i], &context->regs[i]);
+ if (umask & mask)
+ error |= __get_user(regs->regs[i], &context->regs[i]);
}
- __get_user(regs->hi, &context->hi);
- __get_user(regs->lo, &context->lo);
+ error |= __get_user(regs->hi, &context->hi);
+ error |= __get_user(regs->lo, &context->lo);
- if ((umask & 1) && context->usedfp) {
+ error |= __get_user(usedfp, &context->usedfp);
+ if ((umask & 1) && usedfp) {
fregs = (u64 *) &current->thread.fpu;
+
for(i = 0; i < 32; i++)
- fregs[i] = (u64) context->fpregs[i];
- __get_user(current->thread.fpu.hard.fcr31, &context->fpcsr);
+ error |= __get_user(fregs[i], &context->fpregs[i]);
+ error |= __get_user(current->thread.fpu.hard.fcr31, &context->fpcsr);
}
/* XXX do sigstack crapola here... XXX */
- if (__copy_from_user(&blocked, &context->sigset, sizeof(blocked)))
+ error |= __copy_from_user(&blocked, &context->sigset, sizeof(blocked)) ? -EFAULT : 0;
+
+ if (error)
goto badframe;
sigdelsetmask(&blocked, ~_BLOCKABLE);
@@ -296,8 +309,8 @@ static inline void dump_sigact_irix5(struct sigact_irix5 *p)
#endif
asmlinkage int
-irix_sigaction(int sig, const struct sigaction *act,
- struct sigaction *oact, void *trampoline)
+irix_sigaction(int sig, const struct sigaction __user *act,
+ struct sigaction __user *oact, void __user *trampoline)
{
struct k_sigaction new_ka, old_ka;
int ret;
@@ -311,12 +324,16 @@ irix_sigaction(int sig, const struct sigaction *act,
#endif
if (act) {
sigset_t mask;
- if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
- __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka.sa.sa_flags, &act->sa_flags))
+ int err;
+
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)))
return -EFAULT;
+ err = __get_user(new_ka.sa.sa_handler, &act->sa_handler);
+ err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
- __copy_from_user(&mask, &act->sa_mask, sizeof(sigset_t));
+ err |= __copy_from_user(&mask, &act->sa_mask, sizeof(sigset_t)) ? -EFAULT : 0;
+ if (err)
+ return err;
/*
* Hmmm... methinks IRIX libc always passes a valid trampoline
@@ -330,30 +347,37 @@ irix_sigaction(int sig, const struct sigaction *act,
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
- if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
- __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
- __put_user(old_ka.sa.sa_flags, &oact->sa_flags))
+ int err;
+
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
+ return -EFAULT;
+
+ err = __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
+ err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ err |= __copy_to_user(&oact->sa_mask, &old_ka.sa.sa_mask,
+ sizeof(sigset_t)) ? -EFAULT : 0;
+ if (err)
return -EFAULT;
- __copy_to_user(&old_ka.sa.sa_mask, &oact->sa_mask,
- sizeof(sigset_t));
}
return ret;
}
-asmlinkage int irix_sigpending(irix_sigset_t *set)
+asmlinkage int irix_sigpending(irix_sigset_t __user *set)
{
return do_sigpending(set, sizeof(*set));
}
-asmlinkage int irix_sigprocmask(int how, irix_sigset_t *new, irix_sigset_t *old)
+asmlinkage int irix_sigprocmask(int how, irix_sigset_t __user *new,
+ irix_sigset_t __user *old)
{
sigset_t oldbits, newbits;
if (new) {
if (!access_ok(VERIFY_READ, new, sizeof(*new)))
return -EFAULT;
- __copy_from_user(&newbits, new, sizeof(unsigned long)*4);
+ if (__copy_from_user(&newbits, new, sizeof(unsigned long)*4))
+ return -EFAULT;
sigdelsetmask(&newbits, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
@@ -381,20 +405,19 @@ asmlinkage int irix_sigprocmask(int how, irix_sigset_t *new, irix_sigset_t *old)
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
- if(old) {
- if (!access_ok(VERIFY_WRITE, old, sizeof(*old)))
- return -EFAULT;
- __copy_to_user(old, &current->blocked, sizeof(unsigned long)*4);
- }
+ if (old)
+ return copy_to_user(old, &current->blocked,
+ sizeof(unsigned long)*4) ? -EFAULT : 0;
return 0;
}
asmlinkage int irix_sigsuspend(struct pt_regs *regs)
{
- sigset_t *uset, saveset, newset;
+ sigset_t saveset, newset;
+ sigset_t __user *uset;
- uset = (sigset_t *) regs->regs[4];
+ uset = (sigset_t __user *) regs->regs[4];
if (copy_from_user(&newset, uset, sizeof(sigset_t)))
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
@@ -440,12 +463,13 @@ struct irix5_siginfo {
} stuff;
};
-asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info,
- struct timespec *tp)
+asmlinkage int irix_sigpoll_sys(unsigned long __user *set,
+ struct irix5_siginfo __user *info, struct timespec __user *tp)
{
long expire = MAX_SCHEDULE_TIMEOUT;
sigset_t kset;
int i, sig, error, timeo = 0;
+ struct timespec ktp;
#ifdef DEBUG_SIG
printk("[%s:%d] irix_sigpoll_sys(%p,%p,%p)\n",
@@ -456,14 +480,8 @@ asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info,
if (!set)
return -EINVAL;
- if (!access_ok(VERIFY_READ, set, sizeof(kset))) {
- error = -EFAULT;
- goto out;
- }
-
- __copy_from_user(&kset, set, sizeof(set));
- if (error)
- goto out;
+ if (copy_from_user(&kset, set, sizeof(set)))
+ return -EFAULT;
if (info && clear_user(info, sizeof(*info))) {
error = -EFAULT;
@@ -471,19 +489,21 @@ asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info,
}
if (tp) {
- if (!access_ok(VERIFY_READ, tp, sizeof(*tp)))
+ if (copy_from_user(&ktp, tp, sizeof(*tp)))
return -EFAULT;
- if (!tp->tv_sec && !tp->tv_nsec) {
- error = -EINVAL;
- goto out;
- }
- expire = timespec_to_jiffies(tp) + (tp->tv_sec||tp->tv_nsec);
+
+ if (!ktp.tv_sec && !ktp.tv_nsec)
+ return -EINVAL;
+
+ expire = timespec_to_jiffies(&ktp) +
+ (ktp.tv_sec || ktp.tv_nsec);
}
while(1) {
long tmp = 0;
- expire = schedule_timeout_interruptible(expire);
+ current->state = TASK_INTERRUPTIBLE;
+ expire = schedule_timeout(expire);
for (i=0; i<=4; i++)
tmp |= (current->pending.signal.sig[i] & kset.sig[i]);
@@ -500,15 +520,14 @@ asmlinkage int irix_sigpoll_sys(unsigned long *set, struct irix5_siginfo *info,
if (timeo)
return -EAGAIN;
- for(sig = 1; i <= 65 /* IRIX_NSIG */; sig++) {
+ for (sig = 1; i <= 65 /* IRIX_NSIG */; sig++) {
if (sigismember (&kset, sig))
continue;
if (sigismember (&current->pending.signal, sig)) {
/* XXX need more than this... */
if (info)
- info->sig = sig;
- error = 0;
- goto out;
+ return copy_to_user(&info->sig, &sig, sizeof(sig));
+ return 0;
}
}
@@ -534,8 +553,9 @@ extern int getrusage(struct task_struct *, int, struct rusage __user *);
#define W_MASK (W_EXITED | W_TRAPPED | W_STOPPED | W_CONT | W_NOHANG)
-asmlinkage int irix_waitsys(int type, int pid, struct irix5_siginfo *info,
- int options, struct rusage *ru)
+asmlinkage int irix_waitsys(int type, int pid,
+ struct irix5_siginfo __user *info, int options,
+ struct rusage __user *ru)
{
int flag, retval;
DECLARE_WAITQUEUE(wait, current);
@@ -543,28 +563,22 @@ asmlinkage int irix_waitsys(int type, int pid, struct irix5_siginfo *info,
struct task_struct *p;
struct list_head *_p;
- if (!info) {
- retval = -EINVAL;
- goto out;
- }
- if (!access_ok(VERIFY_WRITE, info, sizeof(*info))) {
- retval = -EFAULT;
- goto out;
- }
- if (ru) {
- if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru))) {
- retval = -EFAULT;
- goto out;
- }
- }
- if (options & ~(W_MASK)) {
- retval = -EINVAL;
- goto out;
- }
- if (type != IRIX_P_PID && type != IRIX_P_PGID && type != IRIX_P_ALL) {
- retval = -EINVAL;
- goto out;
- }
+ if (!info)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
+ return -EFAULT;
+
+ if (ru)
+ if (!access_ok(VERIFY_WRITE, ru, sizeof(*ru)))
+ return -EFAULT;
+
+ if (options & ~W_MASK)
+ return -EINVAL;
+
+ if (type != IRIX_P_PID && type != IRIX_P_PGID && type != IRIX_P_ALL)
+ return -EINVAL;
+
add_wait_queue(&current->signal->wait_chldexit, &wait);
repeat:
flag = 0;
@@ -595,18 +609,20 @@ repeat:
add_parent(p, p->parent);
write_unlock_irq(&tasklist_lock);
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
- if (!retval && ru) {
- retval |= __put_user(SIGCHLD, &info->sig);
- retval |= __put_user(0, &info->code);
- retval |= __put_user(p->pid, &info->stuff.procinfo.pid);
- retval |= __put_user((p->exit_code >> 8) & 0xff,
- &info->stuff.procinfo.procdata.child.status);
- retval |= __put_user(p->utime, &info->stuff.procinfo.procdata.child.utime);
- retval |= __put_user(p->stime, &info->stuff.procinfo.procdata.child.stime);
- }
- if (!retval) {
- p->exit_code = 0;
- }
+ if (retval)
+ goto end_waitsys;
+
+ retval = __put_user(SIGCHLD, &info->sig);
+ retval |= __put_user(0, &info->code);
+ retval |= __put_user(p->pid, &info->stuff.procinfo.pid);
+ retval |= __put_user((p->exit_code >> 8) & 0xff,
+ &info->stuff.procinfo.procdata.child.status);
+ retval |= __put_user(p->utime, &info->stuff.procinfo.procdata.child.utime);
+ retval |= __put_user(p->stime, &info->stuff.procinfo.procdata.child.stime);
+ if (retval)
+ goto end_waitsys;
+
+ p->exit_code = 0;
goto end_waitsys;
case EXIT_ZOMBIE:
@@ -614,16 +630,18 @@ repeat:
current->signal->cstime += p->stime + p->signal->cstime;
if (ru != NULL)
getrusage(p, RUSAGE_BOTH, ru);
- __put_user(SIGCHLD, &info->sig);
- __put_user(1, &info->code); /* CLD_EXITED */
- __put_user(p->pid, &info->stuff.procinfo.pid);
- __put_user((p->exit_code >> 8) & 0xff,
+ retval = __put_user(SIGCHLD, &info->sig);
+ retval |= __put_user(1, &info->code); /* CLD_EXITED */
+ retval |= __put_user(p->pid, &info->stuff.procinfo.pid);
+ retval |= __put_user((p->exit_code >> 8) & 0xff,
&info->stuff.procinfo.procdata.child.status);
- __put_user(p->utime,
+ retval |= __put_user(p->utime,
&info->stuff.procinfo.procdata.child.utime);
- __put_user(p->stime,
+ retval |= __put_user(p->stime,
&info->stuff.procinfo.procdata.child.stime);
- retval = 0;
+ if (retval)
+ return retval;
+
if (p->real_parent != p->parent) {
write_lock_irq(&tasklist_lock);
remove_parent(p);
@@ -656,7 +674,6 @@ end_waitsys:
current->state = TASK_RUNNING;
remove_wait_queue(&current->signal->wait_chldexit, &wait);
-out:
return retval;
}
@@ -675,39 +692,39 @@ struct irix5_context {
asmlinkage int irix_getcontext(struct pt_regs *regs)
{
- int i, base = 0;
- struct irix5_context *ctx;
+ int error, i, base = 0;
+ struct irix5_context __user *ctx;
unsigned long flags;
if (regs->regs[2] == 1000)
base = 1;
- ctx = (struct irix5_context *) regs->regs[base + 4];
+ ctx = (struct irix5_context __user *) regs->regs[base + 4];
#ifdef DEBUG_SIG
printk("[%s:%d] irix_getcontext(%p)\n",
current->comm, current->pid, ctx);
#endif
- if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx)))
+ if (!access_ok(VERIFY_WRITE, ctx, sizeof(*ctx)));
return -EFAULT;
- __put_user(current->thread.irix_oldctx, &ctx->link);
+ error = __put_user(current->thread.irix_oldctx, &ctx->link);
- __copy_to_user(&ctx->sigmask, &current->blocked, sizeof(irix_sigset_t));
+ error |= __copy_to_user(&ctx->sigmask, &current->blocked, sizeof(irix_sigset_t)) ? -EFAULT : 0;
/* XXX Do sigstack stuff someday... */
- __put_user(0, &ctx->stack.sp);
- __put_user(0, &ctx->stack.size);
- __put_user(0, &ctx->stack.flags);
+ error |= __put_user(0, &ctx->stack.sp);
+ error |= __put_user(0, &ctx->stack.size);
+ error |= __put_user(0, &ctx->stack.flags);
- __put_user(0, &ctx->weird_graphics_thing);
- __put_user(0, &ctx->regs[0]);
+ error |= __put_user(0, &ctx->weird_graphics_thing);
+ error |= __put_user(0, &ctx->regs[0]);
for (i = 1; i < 32; i++)
- __put_user(regs->regs[i], &ctx->regs[i]);
- __put_user(regs->lo, &ctx->regs[32]);
- __put_user(regs->hi, &ctx->regs[33]);
- __put_user(regs->cp0_cause, &ctx->regs[34]);
- __put_user(regs->cp0_epc, &ctx->regs[35]);
+ error |= __put_user(regs->regs[i], &ctx->regs[i]);
+ error |= __put_user(regs->lo, &ctx->regs[32]);
+ error |= __put_user(regs->hi, &ctx->regs[33]);
+ error |= __put_user(regs->cp0_cause, &ctx->regs[34]);
+ error |= __put_user(regs->cp0_epc, &ctx->regs[35]);
flags = 0x0f;
if (!used_math()) {
@@ -716,119 +733,124 @@ asmlinkage int irix_getcontext(struct pt_regs *regs)
/* XXX wheee... */
printk("Wheee, no code for saving IRIX FPU context yet.\n");
}
- __put_user(flags, &ctx->flags);
+ error |= __put_user(flags, &ctx->flags);
- return 0;
+ return error;
}
-asmlinkage unsigned long irix_setcontext(struct pt_regs *regs)
+asmlinkage void irix_setcontext(struct pt_regs *regs)
{
- int error, base = 0;
- struct irix5_context *ctx;
+ struct irix5_context __user *ctx;
+ int err, base = 0;
+ u32 flags;
- if(regs->regs[2] == 1000)
+ if (regs->regs[2] == 1000)
base = 1;
- ctx = (struct irix5_context *) regs->regs[base + 4];
+ ctx = (struct irix5_context __user *) regs->regs[base + 4];
#ifdef DEBUG_SIG
printk("[%s:%d] irix_setcontext(%p)\n",
current->comm, current->pid, ctx);
#endif
- if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))) {
- error = -EFAULT;
- goto out;
- }
+ if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)))
+ goto segv_and_exit;
- if (ctx->flags & 0x02) {
+ err = __get_user(flags, &ctx->flags);
+ if (flags & 0x02) {
/* XXX sigstack garbage, todo... */
printk("Wheee, cannot do sigstack stuff in setcontext\n");
}
- if (ctx->flags & 0x04) {
+ if (flags & 0x04) {
int i;
/* XXX extra control block stuff... todo... */
- for(i = 1; i < 32; i++)
- regs->regs[i] = ctx->regs[i];
- regs->lo = ctx->regs[32];
- regs->hi = ctx->regs[33];
- regs->cp0_epc = ctx->regs[35];
+ for (i = 1; i < 32; i++)
+ err |= __get_user(regs->regs[i], &ctx->regs[i]);
+ err |= __get_user(regs->lo, &ctx->regs[32]);
+ err |= __get_user(regs->hi, &ctx->regs[33]);
+ err |= __get_user(regs->cp0_epc, &ctx->regs[35]);
}
- if (ctx->flags & 0x08) {
+ if (flags & 0x08)
/* XXX fpu context, blah... */
- printk("Wheee, cannot restore FPU context yet...\n");
- }
- current->thread.irix_oldctx = ctx->link;
- error = regs->regs[2];
+ printk(KERN_ERR "Wheee, cannot restore FPU context yet...\n");
-out:
- return error;
+ err |= __get_user(current->thread.irix_oldctx, &ctx->link);
+ if (err)
+ goto segv_and_exit;
+
+ /*
+ * Don't let your children do this ...
+ */
+ if (current_thread_info()->flags & TIF_SYSCALL_TRACE)
+ do_syscall_trace(regs, 1);
+ __asm__ __volatile__(
+ "move\t$29,%0\n\t"
+ "j\tsyscall_exit"
+ :/* no outputs */
+ :"r" (&regs));
+ /* Unreached */
+
+segv_and_exit:
+ force_sigsegv(SIGSEGV, current);
}
-struct irix_sigstack { unsigned long sp; int status; };
+struct irix_sigstack {
+ unsigned long sp;
+ int status;
+};
-asmlinkage int irix_sigstack(struct irix_sigstack *new, struct irix_sigstack *old)
+asmlinkage int irix_sigstack(struct irix_sigstack __user *new,
+ struct irix_sigstack __user *old)
{
- int error = -EFAULT;
-
#ifdef DEBUG_SIG
printk("[%s:%d] irix_sigstack(%p,%p)\n",
current->comm, current->pid, new, old);
#endif
- if(new) {
+ if (new) {
if (!access_ok(VERIFY_READ, new, sizeof(*new)))
- goto out;
+ return -EFAULT;
}
- if(old) {
+ if (old) {
if (!access_ok(VERIFY_WRITE, old, sizeof(*old)))
- goto out;
+ return -EFAULT;
}
- error = 0;
-out:
- return error;
+ return 0;
}
struct irix_sigaltstack { unsigned long sp; int size; int status; };
-asmlinkage int irix_sigaltstack(struct irix_sigaltstack *new,
- struct irix_sigaltstack *old)
+asmlinkage int irix_sigaltstack(struct irix_sigaltstack __user *new,
+ struct irix_sigaltstack __user *old)
{
- int error = -EFAULT;
-
#ifdef DEBUG_SIG
printk("[%s:%d] irix_sigaltstack(%p,%p)\n",
current->comm, current->pid, new, old);
#endif
- if (new) {
+ if (new)
if (!access_ok(VERIFY_READ, new, sizeof(*new)))
- goto out;
- }
+ return -EFAULT;
if (old) {
if (!access_ok(VERIFY_WRITE, old, sizeof(*old)))
- goto out;
+ return -EFAULT;
}
- error = 0;
-
-out:
- error = 0;
- return error;
+ return 0;
}
struct irix_procset {
int cmd, ltype, lid, rtype, rid;
};
-asmlinkage int irix_sigsendset(struct irix_procset *pset, int sig)
+asmlinkage int irix_sigsendset(struct irix_procset __user *pset, int sig)
{
if (!access_ok(VERIFY_READ, pset, sizeof(*pset)))
return -EFAULT;
-
#ifdef DEBUG_SIG
printk("[%s:%d] irix_sigsendset([%d,%d,%d,%d,%d],%d)\n",
current->comm, current->pid,
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 43c00ac0b88d..3f653c7cfbf3 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -74,7 +74,7 @@ static void disable_msc_irq(unsigned int irq)
static void level_mask_and_ack_msc_irq(unsigned int irq)
{
mask_msc_irq(irq);
- if (!cpu_has_ei)
+ if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
}
@@ -84,7 +84,7 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
static void edge_mask_and_ack_msc_irq(unsigned int irq)
{
mask_msc_irq(irq);
- if (!cpu_has_ei)
+ if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0);
else {
u32 r;
@@ -129,25 +129,23 @@ msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
#define shutdown_msc_irq disable_msc_irq
struct hw_interrupt_type msc_levelirq_type = {
- "SOC-it-Level",
- startup_msc_irq,
- shutdown_msc_irq,
- enable_msc_irq,
- disable_msc_irq,
- level_mask_and_ack_msc_irq,
- end_msc_irq,
- NULL
+ .typename = "SOC-it-Level",
+ .startup = startup_msc_irq,
+ .shutdown = shutdown_msc_irq,
+ .enable = enable_msc_irq,
+ .disable = disable_msc_irq,
+ .ack = level_mask_and_ack_msc_irq,
+ .end = end_msc_irq,
};
struct hw_interrupt_type msc_edgeirq_type = {
- "SOC-it-Edge",
- startup_msc_irq,
- shutdown_msc_irq,
- enable_msc_irq,
- disable_msc_irq,
- edge_mask_and_ack_msc_irq,
- end_msc_irq,
- NULL
+ .typename = "SOC-it-Edge",
+ .startup =startup_msc_irq,
+ .shutdown = shutdown_msc_irq,
+ .enable = enable_msc_irq,
+ .disable = disable_msc_irq,
+ .ack = edge_mask_and_ack_msc_irq,
+ .end = end_msc_irq,
};
@@ -168,14 +166,14 @@ void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq)
switch (imp->im_type) {
case MSC01_IRQ_EDGE:
irq_desc[base+n].handler = &msc_edgeirq_type;
- if (cpu_has_ei)
+ if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
break;
case MSC01_IRQ_LEVEL:
irq_desc[base+n].handler = &msc_levelirq_type;
- if (cpu_has_ei)
+ if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl);
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
index 088bbbc869e6..0ac067f45cf5 100644
--- a/arch/mips/kernel/irq-mv6434x.c
+++ b/arch/mips/kernel/irq-mv6434x.c
@@ -135,14 +135,13 @@ void ll_mv64340_irq(struct pt_regs *regs)
#define shutdown_mv64340_irq disable_mv64340_irq
struct hw_interrupt_type mv64340_irq_type = {
- "MV-64340",
- startup_mv64340_irq,
- shutdown_mv64340_irq,
- enable_mv64340_irq,
- disable_mv64340_irq,
- mask_and_ack_mv64340_irq,
- end_mv64340_irq,
- NULL
+ .typename = "MV-64340",
+ .startup = startup_mv64340_irq,
+ .shutdown = shutdown_mv64340_irq,
+ .enable = enable_mv64340_irq,
+ .disable = disable_mv64340_irq,
+ .ack = mask_and_ack_mv64340_irq,
+ .end = end_mv64340_irq,
};
void __init mv64340_irq_init(unsigned int base)
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index f5d779fd0355..0b130c5ac5d9 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -72,13 +72,13 @@ static void rm7k_cpu_irq_end(unsigned int irq)
}
static hw_irq_controller rm7k_irq_controller = {
- "RM7000",
- rm7k_cpu_irq_startup,
- rm7k_cpu_irq_shutdown,
- rm7k_cpu_irq_enable,
- rm7k_cpu_irq_disable,
- rm7k_cpu_irq_ack,
- rm7k_cpu_irq_end,
+ .typename = "RM7000",
+ .startup = rm7k_cpu_irq_startup,
+ .shutdown = rm7k_cpu_irq_shutdown,
+ .enable = rm7k_cpu_irq_enable,
+ .disable = rm7k_cpu_irq_disable,
+ .ack = rm7k_cpu_irq_ack,
+ .end = rm7k_cpu_irq_end,
};
void __init rm7k_cpu_irq_init(int base)
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index bdd130296256..9b5f20c32acb 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -106,23 +106,23 @@ static void rm9k_cpu_irq_end(unsigned int irq)
}
static hw_irq_controller rm9k_irq_controller = {
- "RM9000",
- rm9k_cpu_irq_startup,
- rm9k_cpu_irq_shutdown,
- rm9k_cpu_irq_enable,
- rm9k_cpu_irq_disable,
- rm9k_cpu_irq_ack,
- rm9k_cpu_irq_end,
+ .typename = "RM9000",
+ .startup = rm9k_cpu_irq_startup,
+ .shutdown = rm9k_cpu_irq_shutdown,
+ .enable = rm9k_cpu_irq_enable,
+ .disable = rm9k_cpu_irq_disable,
+ .ack = rm9k_cpu_irq_ack,
+ .end = rm9k_cpu_irq_end,
};
static hw_irq_controller rm9k_perfcounter_irq = {
- "RM9000",
- rm9k_perfcounter_irq_startup,
- rm9k_perfcounter_irq_shutdown,
- rm9k_cpu_irq_enable,
- rm9k_cpu_irq_disable,
- rm9k_cpu_irq_ack,
- rm9k_cpu_irq_end,
+ .typename = "RM9000",
+ .startup = rm9k_perfcounter_irq_startup,
+ .shutdown = rm9k_perfcounter_irq_shutdown,
+ .enable = rm9k_cpu_irq_enable,
+ .disable = rm9k_cpu_irq_disable,
+ .ack = rm9k_cpu_irq_ack,
+ .end = rm9k_cpu_irq_end,
};
unsigned int rm9000_perfcount_irq;
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 2b936cf1ef70..5db67e31ec1a 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -3,6 +3,8 @@
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*
* Copyright (C) 2001 Ralf Baechle
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Author: Maciej W. Rozycki <macro@mips.com>
*
* This file define the irq handler for MIPS CPU interrupts.
*
@@ -31,19 +33,21 @@
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
#include <asm/system.h>
static int mips_cpu_irq_base;
static inline void unmask_mips_irq(unsigned int irq)
{
- clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
set_c0_status(0x100 << (irq - mips_cpu_irq_base));
+ irq_enable_hazard();
}
static inline void mask_mips_irq(unsigned int irq)
{
clear_c0_status(0x100 << (irq - mips_cpu_irq_base));
+ irq_disable_hazard();
}
static inline void mips_cpu_irq_enable(unsigned int irq)
@@ -52,6 +56,7 @@ static inline void mips_cpu_irq_enable(unsigned int irq)
local_irq_save(flags);
unmask_mips_irq(irq);
+ back_to_back_c0_hazard();
local_irq_restore(flags);
}
@@ -61,6 +66,7 @@ static void mips_cpu_irq_disable(unsigned int irq)
local_irq_save(flags);
mask_mips_irq(irq);
+ back_to_back_c0_hazard();
local_irq_restore(flags);
}
@@ -71,7 +77,7 @@ static unsigned int mips_cpu_irq_startup(unsigned int irq)
return 0;
}
-#define mips_cpu_irq_shutdown mips_cpu_irq_disable
+#define mips_cpu_irq_shutdown mips_cpu_irq_disable
/*
* While we ack the interrupt interrupts are disabled and thus we don't need
@@ -79,9 +85,6 @@ static unsigned int mips_cpu_irq_startup(unsigned int irq)
*/
static void mips_cpu_irq_ack(unsigned int irq)
{
- /* Only necessary for soft interrupts */
- clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
-
mask_mips_irq(irq);
}
@@ -92,22 +95,82 @@ static void mips_cpu_irq_end(unsigned int irq)
}
static hw_irq_controller mips_cpu_irq_controller = {
- "MIPS",
- mips_cpu_irq_startup,
- mips_cpu_irq_shutdown,
- mips_cpu_irq_enable,
- mips_cpu_irq_disable,
- mips_cpu_irq_ack,
- mips_cpu_irq_end,
- NULL /* no affinity stuff for UP */
+ .typename = "MIPS",
+ .startup = mips_cpu_irq_startup,
+ .shutdown = mips_cpu_irq_shutdown,
+ .enable = mips_cpu_irq_enable,
+ .disable = mips_cpu_irq_disable,
+ .ack = mips_cpu_irq_ack,
+ .end = mips_cpu_irq_end,
};
+/*
+ * Basically the same as above but taking care of all the MT stuff
+ */
+
+#define unmask_mips_mt_irq unmask_mips_irq
+#define mask_mips_mt_irq mask_mips_irq
+#define mips_mt_cpu_irq_enable mips_cpu_irq_enable
+#define mips_mt_cpu_irq_disable mips_cpu_irq_disable
+
+static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
+{
+ unsigned int vpflags = dvpe();
+
+ clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
+ evpe(vpflags);
+ mips_mt_cpu_irq_enable(irq);
+
+ return 0;
+}
+
+#define mips_mt_cpu_irq_shutdown mips_mt_cpu_irq_disable
+
+/*
+ * While we ack the interrupt interrupts are disabled and thus we don't need
+ * to deal with concurrency issues. Same for mips_cpu_irq_end.
+ */
+static void mips_mt_cpu_irq_ack(unsigned int irq)
+{
+ unsigned int vpflags = dvpe();
+ clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
+ evpe(vpflags);
+ mask_mips_mt_irq(irq);
+}
+
+#define mips_mt_cpu_irq_end mips_cpu_irq_end
+
+static hw_irq_controller mips_mt_cpu_irq_controller = {
+ .typename = "MIPS",
+ .startup = mips_mt_cpu_irq_startup,
+ .shutdown = mips_mt_cpu_irq_shutdown,
+ .enable = mips_mt_cpu_irq_enable,
+ .disable = mips_mt_cpu_irq_disable,
+ .ack = mips_mt_cpu_irq_ack,
+ .end = mips_mt_cpu_irq_end,
+};
void __init mips_cpu_irq_init(int irq_base)
{
int i;
- for (i = irq_base; i < irq_base + 8; i++) {
+ /* Mask interrupts. */
+ clear_c0_status(ST0_IM);
+ clear_c0_cause(CAUSEF_IP);
+
+ /*
+ * Only MT is using the software interrupts currently, so we just
+ * leave them uninitialized for other processors.
+ */
+ if (cpu_has_mipsmt)
+ for (i = irq_base; i < irq_base + 2; i++) {
+ irq_desc[i].status = IRQ_DISABLED;
+ irq_desc[i].action = NULL;
+ irq_desc[i].depth = 1;
+ irq_desc[i].handler = &mips_mt_cpu_irq_controller;
+ }
+
+ for (i = irq_base + 2; i < irq_base + 8; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index ece4564919d8..330cf84d21fe 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -215,81 +215,32 @@ sys32_readdir(unsigned int fd, void * dirent32, unsigned int count)
return(n);
}
-struct rusage32 {
- struct compat_timeval ru_utime;
- struct compat_timeval ru_stime;
- int ru_maxrss;
- int ru_ixrss;
- int ru_idrss;
- int ru_isrss;
- int ru_minflt;
- int ru_majflt;
- int ru_nswap;
- int ru_inblock;
- int ru_oublock;
- int ru_msgsnd;
- int ru_msgrcv;
- int ru_nsignals;
- int ru_nvcsw;
- int ru_nivcsw;
-};
-
-static int
-put_rusage (struct rusage32 *ru, struct rusage *r)
+asmlinkage int
+sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options)
{
- int err;
-
- if (!access_ok(VERIFY_WRITE, ru, sizeof *ru))
- return -EFAULT;
-
- err = __put_user (r->ru_utime.tv_sec, &ru->ru_utime.tv_sec);
- err |= __put_user (r->ru_utime.tv_usec, &ru->ru_utime.tv_usec);
- err |= __put_user (r->ru_stime.tv_sec, &ru->ru_stime.tv_sec);
- err |= __put_user (r->ru_stime.tv_usec, &ru->ru_stime.tv_usec);
- err |= __put_user (r->ru_maxrss, &ru->ru_maxrss);
- err |= __put_user (r->ru_ixrss, &ru->ru_ixrss);
- err |= __put_user (r->ru_idrss, &ru->ru_idrss);
- err |= __put_user (r->ru_isrss, &ru->ru_isrss);
- err |= __put_user (r->ru_minflt, &ru->ru_minflt);
- err |= __put_user (r->ru_majflt, &ru->ru_majflt);
- err |= __put_user (r->ru_nswap, &ru->ru_nswap);
- err |= __put_user (r->ru_inblock, &ru->ru_inblock);
- err |= __put_user (r->ru_oublock, &ru->ru_oublock);
- err |= __put_user (r->ru_msgsnd, &ru->ru_msgsnd);
- err |= __put_user (r->ru_msgrcv, &ru->ru_msgrcv);
- err |= __put_user (r->ru_nsignals, &ru->ru_nsignals);
- err |= __put_user (r->ru_nvcsw, &ru->ru_nvcsw);
- err |= __put_user (r->ru_nivcsw, &ru->ru_nivcsw);
-
- return err;
+ return compat_sys_wait4(pid, stat_addr, options, NULL);
}
-asmlinkage int
-sys32_wait4(compat_pid_t pid, unsigned int * stat_addr, int options,
- struct rusage32 * ru)
+asmlinkage long
+sysn32_waitid(int which, compat_pid_t pid,
+ siginfo_t __user *uinfo, int options,
+ struct compat_rusage __user *uru)
{
- if (!ru)
- return sys_wait4(pid, stat_addr, options, NULL);
- else {
- struct rusage r;
- int ret;
- unsigned int status;
- mm_segment_t old_fs = get_fs();
+ struct rusage ru;
+ long ret;
+ mm_segment_t old_fs = get_fs();
- set_fs(KERNEL_DS);
- ret = sys_wait4(pid, stat_addr ? &status : NULL, options, &r);
- set_fs(old_fs);
- if (put_rusage (ru, &r)) return -EFAULT;
- if (stat_addr && put_user (status, stat_addr))
- return -EFAULT;
+ set_fs (KERNEL_DS);
+ ret = sys_waitid(which, pid, uinfo, options,
+ uru ? (struct rusage __user *) &ru : NULL);
+ set_fs (old_fs);
+
+ if (ret < 0 || uinfo->si_signo == 0)
return ret;
- }
-}
-asmlinkage int
-sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, int options)
-{
- return sys32_wait4(pid, stat_addr, options, NULL);
+ if (uru)
+ ret = put_compat_rusage(&ru, uru);
+ return ret;
}
struct sysinfo32 {
@@ -1467,3 +1418,80 @@ asmlinkage long sys32_socketcall(int call, unsigned int *args32)
}
return err;
}
+
+struct sigevent32 {
+ u32 sigev_value;
+ u32 sigev_signo;
+ u32 sigev_notify;
+ u32 payload[(64 / 4) - 3];
+};
+
+extern asmlinkage long
+sys_timer_create(clockid_t which_clock,
+ struct sigevent __user *timer_event_spec,
+ timer_t __user * created_timer_id);
+
+long
+sys32_timer_create(u32 clock, struct sigevent32 __user *se32, timer_t __user *timer_id)
+{
+ struct sigevent __user *p = NULL;
+ if (se32) {
+ struct sigevent se;
+ p = compat_alloc_user_space(sizeof(struct sigevent));
+ memset(&se, 0, sizeof(struct sigevent));
+ if (get_user(se.sigev_value.sival_int, &se32->sigev_value) ||
+ __get_user(se.sigev_signo, &se32->sigev_signo) ||
+ __get_user(se.sigev_notify, &se32->sigev_notify) ||
+ __copy_from_user(&se._sigev_un._pad, &se32->payload,
+ sizeof(se32->payload)) ||
+ copy_to_user(p, &se, sizeof(se)))
+ return -EFAULT;
+ }
+ return sys_timer_create(clock, p, timer_id);
+}
+
+asmlinkage long
+sysn32_rt_sigtimedwait(const sigset_t __user *uthese,
+ siginfo_t __user *uinfo,
+ const struct compat_timespec __user *uts32,
+ size_t sigsetsize)
+{
+ struct timespec __user *uts = NULL;
+
+ if (uts32) {
+ struct timespec ts;
+ uts = compat_alloc_user_space(sizeof(struct timespec));
+ if (get_user(ts.tv_sec, &uts32->tv_sec) ||
+ get_user(ts.tv_nsec, &uts32->tv_nsec) ||
+ copy_to_user (uts, &ts, sizeof (ts)))
+ return -EFAULT;
+ }
+ return sys_rt_sigtimedwait(uthese, uinfo, uts, sigsetsize);
+}
+
+save_static_function(sys32_clone);
+__attribute_used__ noinline static int
+_sys32_clone(nabi_no_regargs struct pt_regs regs)
+{
+ unsigned long clone_flags;
+ unsigned long newsp;
+ int __user *parent_tidptr, *child_tidptr;
+
+ clone_flags = regs.regs[4];
+ newsp = regs.regs[5];
+ if (!newsp)
+ newsp = regs.regs[29];
+ parent_tidptr = (int *) regs.regs[6];
+
+ /* Use __dummy4 instead of getting it off the stack, so that
+ syscall() works. */
+ child_tidptr = (int __user *) __dummy4;
+ return do_fork(clone_flags, newsp, &regs, 0,
+ parent_tidptr, child_tidptr);
+}
+
+extern asmlinkage void sys_set_thread_area(u32 addr);
+asmlinkage void sys32_set_thread_area(u32 addr)
+{
+ sys_set_thread_area(AA(addr));
+}
diff --git a/arch/mips/kernel/module-elf32.c b/arch/mips/kernel/module-elf32.c
deleted file mode 100644
index ffd216d6d6dc..000000000000
--- a/arch/mips/kernel/module-elf32.c
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) 2001 Rusty Russell.
- * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
- */
-
-#undef DEBUG
-
-#include <linux/moduleloader.h>
-#include <linux/elf.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-
-struct mips_hi16 {
- struct mips_hi16 *next;
- Elf32_Addr *addr;
- Elf32_Addr value;
-};
-
-static struct mips_hi16 *mips_hi16_list;
-
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
- /* FIXME: If module_region == mod->init_region, trim exception
- table entries. */
-}
-
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
-static int apply_r_mips_none(struct module *me, uint32_t *location,
- Elf32_Addr v)
-{
- return 0;
-}
-
-static int apply_r_mips_32(struct module *me, uint32_t *location,
- Elf32_Addr v)
-{
- *location += v;
-
- return 0;
-}
-
-static int apply_r_mips_26(struct module *me, uint32_t *location,
- Elf32_Addr v)
-{
- if (v % 4) {
- printk(KERN_ERR "module %s: dangerous relocation\n", me->name);
- return -ENOEXEC;
- }
-
- if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
- printk(KERN_ERR
- "module %s: relocation overflow\n",
- me->name);
- return -ENOEXEC;
- }
-
- *location = (*location & ~0x03ffffff) |
- ((*location + (v >> 2)) & 0x03ffffff);
-
- return 0;
-}
-
-static int apply_r_mips_hi16(struct module *me, uint32_t *location,
- Elf32_Addr v)
-{
- struct mips_hi16 *n;
-
- /*
- * We cannot relocate this one now because we don't know the value of
- * the carry we need to add. Save the information, and let LO16 do the
- * actual relocation.
- */
- n = kmalloc(sizeof *n, GFP_KERNEL);
- if (!n)
- return -ENOMEM;
-
- n->addr = location;
- n->value = v;
- n->next = mips_hi16_list;
- mips_hi16_list = n;
-
- return 0;
-}
-
-static int apply_r_mips_lo16(struct module *me, uint32_t *location,
- Elf32_Addr v)
-{
- unsigned long insnlo = *location;
- Elf32_Addr val, vallo;
-
- /* Sign extend the addend we extract from the lo insn. */
- vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
-
- if (mips_hi16_list != NULL) {
- struct mips_hi16 *l;
-
- l = mips_hi16_list;
- while (l != NULL) {
- struct mips_hi16 *next;
- unsigned long insn;
-
- /*
- * The value for the HI16 had best be the same.
- */
- if (v != l->value)
- goto out_danger;
-
- /*
- * Do the HI16 relocation. Note that we actually don't
- * need to know anything about the LO16 itself, except
- * where to find the low 16 bits of the addend needed
- * by the LO16.
- */
- insn = *l->addr;
- val = ((insn & 0xffff) << 16) + vallo;
- val += v;
-
- /*
- * Account for the sign extension that will happen in
- * the low bits.
- */
- val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
-
- insn = (insn & ~0xffff) | val;
- *l->addr = insn;
-
- next = l->next;
- kfree(l);
- l = next;
- }
-
- mips_hi16_list = NULL;
- }
-
- /*
- * Ok, we're done with the HI16 relocs. Now deal with the LO16.
- */
- val = v + vallo;
- insnlo = (insnlo & ~0xffff) | (val & 0xffff);
- *location = insnlo;
-
- return 0;
-
-out_danger:
- printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name);
-
- return -ENOEXEC;
-}
-
-static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
- Elf32_Addr v) = {
- [R_MIPS_NONE] = apply_r_mips_none,
- [R_MIPS_32] = apply_r_mips_32,
- [R_MIPS_26] = apply_r_mips_26,
- [R_MIPS_HI16] = apply_r_mips_hi16,
- [R_MIPS_LO16] = apply_r_mips_lo16
-};
-
-int apply_relocate(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
- Elf32_Sym *sym;
- uint32_t *location;
- unsigned int i;
- Elf32_Addr v;
- int res;
-
- pr_debug("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
-
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
- Elf32_Word r_info = rel[i].r_info;
-
- /* This is where to make the change */
- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
- + rel[i].r_offset;
- /* This is the symbol it is referring to */
- sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
- + ELF32_R_SYM(r_info);
- if (!sym->st_value) {
- printk(KERN_WARNING "%s: Unknown symbol %s\n",
- me->name, strtab + sym->st_name);
- return -ENOENT;
- }
-
- v = sym->st_value;
-
- res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
- if (res)
- return res;
- }
-
- return 0;
-}
-
-int apply_relocate_add(Elf32_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- /*
- * Current binutils always generate .rela relocations. Keep smiling
- * if it's empty, abort otherwise.
- */
- if (!sechdrs[relsec].sh_size)
- return 0;
-
- printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
- me->name);
- return -ENOEXEC;
-}
diff --git a/arch/mips/kernel/module-elf64.c b/arch/mips/kernel/module-elf64.c
deleted file mode 100644
index e804792ee1ee..000000000000
--- a/arch/mips/kernel/module-elf64.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) 2001 Rusty Russell.
- * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
- */
-
-#undef DEBUG
-
-#include <linux/moduleloader.h>
-#include <linux/elf.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-
-struct mips_hi16 {
- struct mips_hi16 *next;
- Elf32_Addr *addr;
- Elf64_Addr value;
-};
-
-static struct mips_hi16 *mips_hi16_list;
-
-void *module_alloc(unsigned long size)
-{
- if (size == 0)
- return NULL;
- return vmalloc(size);
-}
-
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
-{
- vfree(module_region);
- /* FIXME: If module_region == mod->init_region, trim exception
- table entries. */
-}
-
-int module_frob_arch_sections(Elf_Ehdr *hdr,
- Elf_Shdr *sechdrs,
- char *secstrings,
- struct module *mod)
-{
- return 0;
-}
-
-int apply_relocate(Elf64_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- /*
- * We don't want to deal with REL relocations - RELA is so much saner.
- */
- if (!sechdrs[relsec].sh_size)
- return 0;
-
- printk(KERN_ERR "module %s: REL relocation unsupported\n",
- me->name);
- return -ENOEXEC;
-}
-
-static int apply_r_mips_none(struct module *me, uint32_t *location,
- Elf64_Addr v)
-{
- return 0;
-}
-
-static int apply_r_mips_32(struct module *me, uint32_t *location,
- Elf64_Addr v)
-{
- *location = v;
-
- return 0;
-}
-
-static int apply_r_mips_26(struct module *me, uint32_t *location,
- Elf64_Addr v)
-{
- if (v % 4) {
- printk(KERN_ERR "module %s: dangerous relocation\n", me->name);
- return -ENOEXEC;
- }
-
- if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
- printk(KERN_ERR
- "module %s: relocation overflow\n",
- me->name);
- return -ENOEXEC;
- }
-
- *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
-
- return 0;
-}
-
-static int apply_r_mips_hi16(struct module *me, uint32_t *location,
- Elf64_Addr v)
-{
- struct mips_hi16 *n;
-
- /*
- * We cannot relocate this one now because we don't know the value of
- * the carry we need to add. Save the information, and let LO16 do the
- * actual relocation.
- */
- n = kmalloc(sizeof *n, GFP_KERNEL);
- if (!n)
- return -ENOMEM;
-
- n->addr = location;
- n->value = v;
- n->next = mips_hi16_list;
- mips_hi16_list = n;
-
- return 0;
-}
-
-static int apply_r_mips_lo16(struct module *me, uint32_t *location,
- Elf64_Addr v)
-{
- unsigned long insnlo = *location;
- Elf32_Addr val, vallo;
-
- /* Sign extend the addend we extract from the lo insn. */
- vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
-
- if (mips_hi16_list != NULL) {
- struct mips_hi16 *l;
-
- l = mips_hi16_list;
- while (l != NULL) {
- struct mips_hi16 *next;
- unsigned long insn;
-
- /*
- * The value for the HI16 had best be the same.
- */
- if (v != l->value)
- goto out_danger;
-
- /*
- * Do the HI16 relocation. Note that we actually don't
- * need to know anything about the LO16 itself, except
- * where to find the low 16 bits of the addend needed
- * by the LO16.
- */
- insn = *l->addr;
- val = ((insn & 0xffff) << 16) + vallo;
- val += v;
-
- /*
- * Account for the sign extension that will happen in
- * the low bits.
- */
- val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
-
- insn = (insn & ~0xffff) | val;
- *l->addr = insn;
-
- next = l->next;
- kfree(l);
- l = next;
- }
-
- mips_hi16_list = NULL;
- }
-
- /*
- * Ok, we're done with the HI16 relocs. Now deal with the LO16.
- */
- insnlo = (insnlo & ~0xffff) | (v & 0xffff);
- *location = insnlo;
-
- return 0;
-
-out_danger:
- printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name);
-
- return -ENOEXEC;
-}
-
-static int apply_r_mips_64(struct module *me, uint32_t *location,
- Elf64_Addr v)
-{
- *(uint64_t *) location = v;
-
- return 0;
-}
-
-
-static int apply_r_mips_higher(struct module *me, uint32_t *location,
- Elf64_Addr v)
-{
- *location = (*location & 0xffff0000) |
- ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
-
- return 0;
-}
-
-static int apply_r_mips_highest(struct module *me, uint32_t *location,
- Elf64_Addr v)
-{
- *location = (*location & 0xffff0000) |
- ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
-
- return 0;
-}
-
-static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
- Elf64_Addr v) = {
- [R_MIPS_NONE] = apply_r_mips_none,
- [R_MIPS_32] = apply_r_mips_32,
- [R_MIPS_26] = apply_r_mips_26,
- [R_MIPS_HI16] = apply_r_mips_hi16,
- [R_MIPS_LO16] = apply_r_mips_lo16,
- [R_MIPS_64] = apply_r_mips_64,
- [R_MIPS_HIGHER] = apply_r_mips_higher,
- [R_MIPS_HIGHEST] = apply_r_mips_highest
-};
-
-int apply_relocate_add(Elf64_Shdr *sechdrs,
- const char *strtab,
- unsigned int symindex,
- unsigned int relsec,
- struct module *me)
-{
- Elf64_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
- Elf64_Sym *sym;
- uint32_t *location;
- unsigned int i;
- Elf64_Addr v;
- int res;
-
- pr_debug("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
-
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
- /* This is where to make the change */
- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
- + rel[i].r_offset;
- /* This is the symbol it is referring to */
- sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + rel[i].r_sym;
- if (!sym->st_value) {
- printk(KERN_WARNING "%s: Unknown symbol %s\n",
- me->name, strtab + sym->st_name);
- return -ENOENT;
- }
-
- v = sym->st_value;
-
- res = reloc_handlers[rel[i].r_type](me, location, v);
- if (res)
- return res;
- }
-
- return 0;
-}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 458af3c7a639..e54a7f442f8a 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -1,9 +1,345 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (C) 2001 Rusty Russell.
+ * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2005 Thiemo Seufer
+ */
+
+#undef DEBUG
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
+struct mips_hi16 {
+ struct mips_hi16 *next;
+ Elf_Addr *addr;
+ Elf_Addr value;
+};
+
+static struct mips_hi16 *mips_hi16_list;
+
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
+void *module_alloc(unsigned long size)
+{
+ if (size == 0)
+ return NULL;
+ return vmalloc(size);
+}
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+ vfree(module_region);
+ /* FIXME: If module_region == mod->init_region, trim exception
+ table entries. */
+}
+
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+{
+ return 0;
+}
+
+static int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
+{
+ return 0;
+}
+
+static int apply_r_mips_32_rel(struct module *me, u32 *location, Elf_Addr v)
+{
+ *location += v;
+
+ return 0;
+}
+
+static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ *location = v;
+
+ return 0;
+}
+
+static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
+{
+ if (v % 4) {
+ printk(KERN_ERR "module %s: dangerous relocation\n", me->name);
+ return -ENOEXEC;
+ }
+
+ if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+ printk(KERN_ERR
+ "module %s: relocation overflow\n",
+ me->name);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & ~0x03ffffff) |
+ ((*location + (v >> 2)) & 0x03ffffff);
+
+ return 0;
+}
+
+static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ if (v % 4) {
+ printk(KERN_ERR "module %s: dangerous relocation\n", me->name);
+ return -ENOEXEC;
+ }
+
+ if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+ printk(KERN_ERR
+ "module %s: relocation overflow\n",
+ me->name);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
+
+ return 0;
+}
+
+static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
+{
+ struct mips_hi16 *n;
+
+ /*
+ * We cannot relocate this one now because we don't know the value of
+ * the carry we need to add. Save the information, and let LO16 do the
+ * actual relocation.
+ */
+ n = kmalloc(sizeof *n, GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ n->addr = (Elf_Addr *)location;
+ n->value = v;
+ n->next = mips_hi16_list;
+ mips_hi16_list = n;
+
+ return 0;
+}
+
+static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ *location = (*location & 0xffff0000) |
+ ((((long long) v + 0x8000LL) >> 16) & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
+{
+ unsigned long insnlo = *location;
+ Elf_Addr val, vallo;
+
+ /* Sign extend the addend we extract from the lo insn. */
+ vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
+
+ if (mips_hi16_list != NULL) {
+ struct mips_hi16 *l;
+
+ l = mips_hi16_list;
+ while (l != NULL) {
+ struct mips_hi16 *next;
+ unsigned long insn;
+
+ /*
+ * The value for the HI16 had best be the same.
+ */
+ if (v != l->value)
+ goto out_danger;
+
+ /*
+ * Do the HI16 relocation. Note that we actually don't
+ * need to know anything about the LO16 itself, except
+ * where to find the low 16 bits of the addend needed
+ * by the LO16.
+ */
+ insn = *l->addr;
+ val = ((insn & 0xffff) << 16) + vallo;
+ val += v;
+
+ /*
+ * Account for the sign extension that will happen in
+ * the low bits.
+ */
+ val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
+
+ insn = (insn & ~0xffff) | val;
+ *l->addr = insn;
+
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
+
+ mips_hi16_list = NULL;
+ }
+
+ /*
+ * Ok, we're done with the HI16 relocs. Now deal with the LO16.
+ */
+ val = v + vallo;
+ insnlo = (insnlo & ~0xffff) | (val & 0xffff);
+ *location = insnlo;
+
+ return 0;
+
+out_danger:
+ printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name);
+
+ return -ENOEXEC;
+}
+
+static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ *location = (*location & 0xffff0000) | (v & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ *(Elf_Addr *)location = v;
+
+ return 0;
+}
+
+static int apply_r_mips_higher_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ *location = (*location & 0xffff0000) |
+ ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_highest_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ *location = (*location & 0xffff0000) |
+ ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
+
+ return 0;
+}
+
+static int (*reloc_handlers_rel[]) (struct module *me, u32 *location,
+ Elf_Addr v) = {
+ [R_MIPS_NONE] = apply_r_mips_none,
+ [R_MIPS_32] = apply_r_mips_32_rel,
+ [R_MIPS_26] = apply_r_mips_26_rel,
+ [R_MIPS_HI16] = apply_r_mips_hi16_rel,
+ [R_MIPS_LO16] = apply_r_mips_lo16_rel
+};
+
+static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
+ Elf_Addr v) = {
+ [R_MIPS_NONE] = apply_r_mips_none,
+ [R_MIPS_32] = apply_r_mips_32_rela,
+ [R_MIPS_26] = apply_r_mips_26_rela,
+ [R_MIPS_HI16] = apply_r_mips_hi16_rela,
+ [R_MIPS_LO16] = apply_r_mips_lo16_rela,
+ [R_MIPS_64] = apply_r_mips_64_rela,
+ [R_MIPS_HIGHER] = apply_r_mips_higher_rela,
+ [R_MIPS_HIGHEST] = apply_r_mips_highest_rela
+};
+
+int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr;
+ Elf_Sym *sym;
+ u32 *location;
+ unsigned int i;
+ Elf_Addr v;
+ int res;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ + ELF_MIPS_R_SYM(rel[i]);
+ if (!sym->st_value) {
+ printk(KERN_WARNING "%s: Unknown symbol %s\n",
+ me->name, strtab + sym->st_name);
+ return -ENOENT;
+ }
+
+ v = sym->st_value;
+
+ res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+ Elf_Sym *sym;
+ u32 *location;
+ unsigned int i;
+ Elf_Addr v;
+ int res;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ + ELF_MIPS_R_SYM(rel[i]);
+ if (!sym->st_value) {
+ printk(KERN_WARNING "%s: Unknown symbol %s\n",
+ me->name, strtab + sym->st_name);
+ return -ENOENT;
+ }
+
+ v = sym->st_value + rel[i].r_addend;
+
+ res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
/* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_dbetables(unsigned long addr)
{
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 0f159f30e894..86fe15b273cd 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -2,7 +2,8 @@
* linux/arch/mips/kernel/proc.c
*
* Copyright (C) 1995, 1996, 2001 Ralf Baechle
- * Copyright (C) 2001 MIPS Technologies, Inc.
+ * Copyright (C) 2001, 2004 MIPS Technologies, Inc.
+ * Copyright (C) 2004 Maciej W. Rozycki
*/
#include <linux/config.h>
#include <linux/delay.h>
@@ -19,63 +20,69 @@
unsigned int vced_count, vcei_count;
static const char *cpu_name[] = {
- [CPU_UNKNOWN] "unknown",
- [CPU_R2000] "R2000",
- [CPU_R3000] "R3000",
- [CPU_R3000A] "R3000A",
- [CPU_R3041] "R3041",
- [CPU_R3051] "R3051",
- [CPU_R3052] "R3052",
- [CPU_R3081] "R3081",
- [CPU_R3081E] "R3081E",
- [CPU_R4000PC] "R4000PC",
- [CPU_R4000SC] "R4000SC",
- [CPU_R4000MC] "R4000MC",
- [CPU_R4200] "R4200",
- [CPU_R4400PC] "R4400PC",
- [CPU_R4400SC] "R4400SC",
- [CPU_R4400MC] "R4400MC",
- [CPU_R4600] "R4600",
- [CPU_R6000] "R6000",
- [CPU_R6000A] "R6000A",
- [CPU_R8000] "R8000",
- [CPU_R10000] "R10000",
- [CPU_R12000] "R12000",
- [CPU_R4300] "R4300",
- [CPU_R4650] "R4650",
- [CPU_R4700] "R4700",
- [CPU_R5000] "R5000",
- [CPU_R5000A] "R5000A",
- [CPU_R4640] "R4640",
- [CPU_NEVADA] "Nevada",
- [CPU_RM7000] "RM7000",
- [CPU_RM9000] "RM9000",
- [CPU_R5432] "R5432",
- [CPU_4KC] "MIPS 4Kc",
- [CPU_5KC] "MIPS 5Kc",
- [CPU_R4310] "R4310",
- [CPU_SB1] "SiByte SB1",
- [CPU_TX3912] "TX3912",
- [CPU_TX3922] "TX3922",
- [CPU_TX3927] "TX3927",
- [CPU_AU1000] "Au1000",
- [CPU_AU1500] "Au1500",
- [CPU_4KEC] "MIPS 4KEc",
- [CPU_4KSC] "MIPS 4KSc",
- [CPU_VR41XX] "NEC Vr41xx",
- [CPU_R5500] "R5500",
- [CPU_TX49XX] "TX49xx",
- [CPU_20KC] "MIPS 20Kc",
- [CPU_24K] "MIPS 24K",
- [CPU_25KF] "MIPS 25Kf",
- [CPU_VR4111] "NEC VR4111",
- [CPU_VR4121] "NEC VR4121",
- [CPU_VR4122] "NEC VR4122",
- [CPU_VR4131] "NEC VR4131",
- [CPU_VR4133] "NEC VR4133",
- [CPU_VR4181] "NEC VR4181",
- [CPU_VR4181A] "NEC VR4181A",
- [CPU_SR71000] "Sandcraft SR71000"
+ [CPU_UNKNOWN] = "unknown",
+ [CPU_R2000] = "R2000",
+ [CPU_R3000] = "R3000",
+ [CPU_R3000A] = "R3000A",
+ [CPU_R3041] = "R3041",
+ [CPU_R3051] = "R3051",
+ [CPU_R3052] = "R3052",
+ [CPU_R3081] = "R3081",
+ [CPU_R3081E] = "R3081E",
+ [CPU_R4000PC] = "R4000PC",
+ [CPU_R4000SC] = "R4000SC",
+ [CPU_R4000MC] = "R4000MC",
+ [CPU_R4200] = "R4200",
+ [CPU_R4400PC] = "R4400PC",
+ [CPU_R4400SC] = "R4400SC",
+ [CPU_R4400MC] = "R4400MC",
+ [CPU_R4600] = "R4600",
+ [CPU_R6000] = "R6000",
+ [CPU_R6000A] = "R6000A",
+ [CPU_R8000] = "R8000",
+ [CPU_R10000] = "R10000",
+ [CPU_R12000] = "R12000",
+ [CPU_R4300] = "R4300",
+ [CPU_R4650] = "R4650",
+ [CPU_R4700] = "R4700",
+ [CPU_R5000] = "R5000",
+ [CPU_R5000A] = "R5000A",
+ [CPU_R4640] = "R4640",
+ [CPU_NEVADA] = "Nevada",
+ [CPU_RM7000] = "RM7000",
+ [CPU_RM9000] = "RM9000",
+ [CPU_R5432] = "R5432",
+ [CPU_4KC] = "MIPS 4Kc",
+ [CPU_5KC] = "MIPS 5Kc",
+ [CPU_R4310] = "R4310",
+ [CPU_SB1] = "SiByte SB1",
+ [CPU_SB1A] = "SiByte SB1A",
+ [CPU_TX3912] = "TX3912",
+ [CPU_TX3922] = "TX3922",
+ [CPU_TX3927] = "TX3927",
+ [CPU_AU1000] = "Au1000",
+ [CPU_AU1500] = "Au1500",
+ [CPU_AU1100] = "Au1100",
+ [CPU_AU1550] = "Au1550",
+ [CPU_AU1200] = "Au1200",
+ [CPU_4KEC] = "MIPS 4KEc",
+ [CPU_4KSC] = "MIPS 4KSc",
+ [CPU_VR41XX] = "NEC Vr41xx",
+ [CPU_R5500] = "R5500",
+ [CPU_TX49XX] = "TX49xx",
+ [CPU_20KC] = "MIPS 20Kc",
+ [CPU_24K] = "MIPS 24K",
+ [CPU_25KF] = "MIPS 25Kf",
+ [CPU_34K] = "MIPS 34K",
+ [CPU_VR4111] = "NEC VR4111",
+ [CPU_VR4121] = "NEC VR4121",
+ [CPU_VR4122] = "NEC VR4122",
+ [CPU_VR4131] = "NEC VR4131",
+ [CPU_VR4133] = "NEC VR4133",
+ [CPU_VR4181] = "NEC VR4181",
+ [CPU_VR4181A] = "NEC VR4181A",
+ [CPU_SR71000] = "Sandcraft SR71000",
+ [CPU_PR4450] = "Philips PR4450",
};
@@ -105,8 +112,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
(version >> 4) & 0x0f, version & 0x0f,
(fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
seq_printf(m, "BogoMIPS\t\t: %lu.%02lu\n",
- loops_per_jiffy / (500000/HZ),
- (loops_per_jiffy / (5000/HZ)) % 100);
+ cpu_data[n].udelay_val / (500000/HZ),
+ (cpu_data[n].udelay_val / (5000/HZ)) % 100);
seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
seq_printf(m, "microsecond timers\t: %s\n",
cpu_has_counter ? "yes" : "no");
@@ -115,6 +122,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpu_has_divec ? "yes" : "no");
seq_printf(m, "hardware watchpoint\t: %s\n",
cpu_has_watch ? "yes" : "no");
+ seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n",
+ cpu_has_mips16 ? " mips16" : "",
+ cpu_has_mdmx ? " mdmx" : "",
+ cpu_has_mips3d ? " mips3d" : "",
+ cpu_has_smartmips ? " smartmips" : "",
+ cpu_has_dsp ? " dsp" : "",
+ cpu_has_mipsmt ? " mt" : ""
+ );
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
cpu_has_vce ? "%u" : "not available");
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index e4f2f8011387..4fe3d5715c41 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -25,8 +25,10 @@
#include <linux/init.h>
#include <linux/completion.h>
+#include <asm/abi.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
+#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -39,14 +41,6 @@
#include <asm/inst.h>
/*
- * We use this if we don't have any better idle routine..
- * (This to kill: kernel/platform.c.
- */
-void default_idle (void)
-{
-}
-
-/*
* The idle thread. There's no useful work to be done, so just try to conserve
* power and have a low exit latency (ie sit in a loop waiting for somebody to
* say that they'd like to reschedule)
@@ -62,6 +56,54 @@ ATTRIB_NORET void cpu_idle(void)
}
}
+extern int do_signal(sigset_t *oldset, struct pt_regs *regs);
+extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
+
+/*
+ * Native o32 and N64 ABI without DSP ASE
+ */
+extern int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
+ int signr, sigset_t *set);
+extern int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
+ int signr, sigset_t *set, siginfo_t *info);
+
+struct mips_abi mips_abi = {
+ .do_signal = do_signal,
+#ifdef CONFIG_TRAD_SIGNALS
+ .setup_frame = setup_frame,
+#endif
+ .setup_rt_frame = setup_rt_frame
+};
+
+#ifdef CONFIG_MIPS32_O32
+/*
+ * o32 compatibility on 64-bit kernels, without DSP ASE
+ */
+extern int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
+ int signr, sigset_t *set);
+extern int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
+ int signr, sigset_t *set, siginfo_t *info);
+
+struct mips_abi mips_abi_32 = {
+ .do_signal = do_signal32,
+ .setup_frame = setup_frame_32,
+ .setup_rt_frame = setup_rt_frame_32
+};
+#endif /* CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_MIPS32_N32
+/*
+ * N32 on 64-bit kernels, without DSP ASE
+ */
+extern int setup_rt_frame_n32(struct k_sigaction * ka, struct pt_regs *regs,
+ int signr, sigset_t *set, siginfo_t *info);
+
+struct mips_abi mips_abi_n32 = {
+ .do_signal = do_signal,
+ .setup_rt_frame = setup_rt_frame_n32
+};
+#endif /* CONFIG_MIPS32_N32 */
+
asmlinkage void ret_from_fork(void);
void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
@@ -78,6 +120,8 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
regs->cp0_status = status;
clear_used_math();
lose_fpu();
+ if (cpu_has_dsp)
+ __init_dsp();
regs->cp0_epc = pc;
regs->regs[29] = sp;
current_thread_info()->addr_limit = USER_DS;
@@ -97,14 +141,17 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
struct thread_info *ti = p->thread_info;
struct pt_regs *childregs;
long childksp;
+ p->set_child_tid = p->clear_child_tid = NULL;
childksp = (unsigned long)ti + THREAD_SIZE - 32;
preempt_disable();
- if (is_fpu_owner()) {
+ if (is_fpu_owner())
save_fp(p);
- }
+
+ if (cpu_has_dsp)
+ save_dsp(p);
preempt_enable();
@@ -142,6 +189,9 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
clear_tsk_thread_flag(p, TIF_USEDFPU);
+ if (clone_flags & CLONE_SETTLS)
+ ti->tp_value = regs->regs[7];
+
return 0;
}
@@ -175,6 +225,14 @@ void dump_regs(elf_greg_t *gp, struct pt_regs *regs)
#endif
}
+int dump_task_regs (struct task_struct *tsk, elf_gregset_t *regs)
+{
+ struct thread_info *ti = tsk->thread_info;
+ long ksp = (unsigned long)ti + THREAD_SIZE - 32;
+ dump_regs(&(*regs)[0], (struct pt_regs *) ksp - 1);
+ return 1;
+}
+
int dump_task_fpu (struct task_struct *t, elf_fpregset_t *fpr)
{
memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
@@ -211,22 +269,48 @@ long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
-struct mips_frame_info {
+static struct mips_frame_info {
+ void *func;
+ int omit_fp; /* compiled without fno-omit-frame-pointer */
int frame_offset;
int pc_offset;
+} schedule_frame, mfinfo[] = {
+ { schedule, 0 }, /* must be first */
+ /* arch/mips/kernel/semaphore.c */
+ { __down, 1 },
+ { __down_interruptible, 1 },
+ /* kernel/sched.c */
+#ifdef CONFIG_PREEMPT
+ { preempt_schedule, 0 },
+#endif
+ { wait_for_completion, 0 },
+ { interruptible_sleep_on, 0 },
+ { interruptible_sleep_on_timeout, 0 },
+ { sleep_on, 0 },
+ { sleep_on_timeout, 0 },
+ { yield, 0 },
+ { io_schedule, 0 },
+ { io_schedule_timeout, 0 },
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
+ { __preempt_spin_lock, 0 },
+ { __preempt_write_lock, 0 },
+#endif
+ /* kernel/timer.c */
+ { schedule_timeout, 1 },
+/* { nanosleep_restart, 1 }, */
+ /* lib/rwsem-spinlock.c */
+ { __down_read, 1 },
+ { __down_write, 1 },
};
-static struct mips_frame_info schedule_frame;
-static struct mips_frame_info schedule_timeout_frame;
-static struct mips_frame_info sleep_on_frame;
-static struct mips_frame_info sleep_on_timeout_frame;
-static struct mips_frame_info wait_for_completion_frame;
+
static int mips_frame_info_initialized;
-static int __init get_frame_info(struct mips_frame_info *info, void *func)
+static int __init get_frame_info(struct mips_frame_info *info)
{
int i;
+ void *func = info->func;
union mips_instruction *ip = (union mips_instruction *)func;
info->pc_offset = -1;
- info->frame_offset = -1;
+ info->frame_offset = info->omit_fp ? 0 : -1;
for (i = 0; i < 128; i++, ip++) {
/* if jal, jalr, jr, stop. */
if (ip->j_format.opcode == jal_op ||
@@ -247,14 +331,16 @@ static int __init get_frame_info(struct mips_frame_info *info, void *func)
/* sw / sd $ra, offset($sp) */
if (ip->i_format.rt == 31) {
if (info->pc_offset != -1)
- break;
+ continue;
info->pc_offset =
ip->i_format.simmediate / sizeof(long);
}
/* sw / sd $s8, offset($sp) */
if (ip->i_format.rt == 30) {
+//#if 0 /* gcc 3.4 does aggressive optimization... */
if (info->frame_offset != -1)
- break;
+ continue;
+//#endif
info->frame_offset =
ip->i_format.simmediate / sizeof(long);
}
@@ -272,13 +358,25 @@ static int __init get_frame_info(struct mips_frame_info *info, void *func)
static int __init frame_info_init(void)
{
- mips_frame_info_initialized =
- !get_frame_info(&schedule_frame, schedule) &&
- !get_frame_info(&schedule_timeout_frame, schedule_timeout) &&
- !get_frame_info(&sleep_on_frame, sleep_on) &&
- !get_frame_info(&sleep_on_timeout_frame, sleep_on_timeout) &&
- !get_frame_info(&wait_for_completion_frame, wait_for_completion);
-
+ int i, found;
+ for (i = 0; i < ARRAY_SIZE(mfinfo); i++)
+ if (get_frame_info(&mfinfo[i]))
+ return -1;
+ schedule_frame = mfinfo[0];
+ /* bubble sort */
+ do {
+ struct mips_frame_info tmp;
+ found = 0;
+ for (i = 1; i < ARRAY_SIZE(mfinfo); i++) {
+ if (mfinfo[i-1].func > mfinfo[i].func) {
+ tmp = mfinfo[i];
+ mfinfo[i] = mfinfo[i-1];
+ mfinfo[i-1] = tmp;
+ found = 1;
+ }
+ }
+ } while (found);
+ mips_frame_info_initialized = 1;
return 0;
}
@@ -303,60 +401,39 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
/* get_wchan - a maintenance nightmare^W^Wpain in the ass ... */
unsigned long get_wchan(struct task_struct *p)
{
+ unsigned long stack_page;
unsigned long frame, pc;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
- if (!mips_frame_info_initialized)
+ stack_page = (unsigned long)p->thread_info;
+ if (!stack_page || !mips_frame_info_initialized)
return 0;
+
pc = thread_saved_pc(p);
if (!in_sched_functions(pc))
- goto out;
-
- if (pc >= (unsigned long) sleep_on_timeout)
- goto schedule_timeout_caller;
- if (pc >= (unsigned long) sleep_on)
- goto schedule_caller;
- if (pc >= (unsigned long) interruptible_sleep_on_timeout)
- goto schedule_timeout_caller;
- if (pc >= (unsigned long)interruptible_sleep_on)
- goto schedule_caller;
- if (pc >= (unsigned long)wait_for_completion)
- goto schedule_caller;
- goto schedule_timeout_caller;
-
-schedule_caller:
- frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset];
- if (pc >= (unsigned long) sleep_on)
- pc = ((unsigned long *)frame)[sleep_on_frame.pc_offset];
- else
- pc = ((unsigned long *)frame)[wait_for_completion_frame.pc_offset];
- goto out;
+ return pc;
-schedule_timeout_caller:
- /*
- * The schedule_timeout frame
- */
frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset];
+ do {
+ int i;
- /*
- * frame now points to sleep_on_timeout's frame
- */
- pc = ((unsigned long *)frame)[schedule_timeout_frame.pc_offset];
-
- if (in_sched_functions(pc)) {
- /* schedule_timeout called by [interruptible_]sleep_on_timeout */
- frame = ((unsigned long *)frame)[schedule_timeout_frame.frame_offset];
- pc = ((unsigned long *)frame)[sleep_on_timeout_frame.pc_offset];
- }
+ if (frame < stack_page || frame > stack_page + THREAD_SIZE - 32)
+ return 0;
-out:
+ for (i = ARRAY_SIZE(mfinfo) - 1; i >= 0; i--) {
+ if (pc >= (unsigned long) mfinfo[i].func)
+ break;
+ }
+ if (i < 0)
+ break;
-#ifdef CONFIG_64BIT
- if (current->thread.mflags & MF_32BIT_REGS) /* Kludge for 32-bit ps */
- pc &= 0xffffffffUL;
-#endif
+ if (mfinfo[i].omit_fp)
+ break;
+ pc = ((unsigned long *)frame)[mfinfo[i].pc_offset];
+ frame = ((unsigned long *)frame)[mfinfo[i].frame_offset];
+ } while (in_sched_functions(pc));
return pc;
}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 0b571a5b4b83..f1b0f3e1f95b 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -28,14 +28,18 @@
#include <linux/security.h>
#include <linux/signal.h>
+#include <asm/byteorder.h>
#include <asm/cpu.h>
+#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/bootinfo.h>
+#include <asm/reg.h>
/*
* Called by kernel/ptrace.c when detaching..
@@ -47,7 +51,130 @@ void ptrace_disable(struct task_struct *child)
/* Nothing to do.. */
}
-asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
+/*
+ * Read a general register set. We always use the 64-bit format, even
+ * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
+ * Registers are sign extended to fill the available space.
+ */
+int ptrace_getregs (struct task_struct *child, __s64 __user *data)
+{
+ struct pt_regs *regs;
+ int i;
+
+ if (!access_ok(VERIFY_WRITE, data, 38 * 8))
+ return -EIO;
+
+ regs = (struct pt_regs *) ((unsigned long) child->thread_info +
+ THREAD_SIZE - 32 - sizeof(struct pt_regs));
+
+ for (i = 0; i < 32; i++)
+ __put_user (regs->regs[i], data + i);
+ __put_user (regs->lo, data + EF_LO - EF_R0);
+ __put_user (regs->hi, data + EF_HI - EF_R0);
+ __put_user (regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
+ __put_user (regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
+ __put_user (regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
+ __put_user (regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
+
+ return 0;
+}
+
+/*
+ * Write a general register set. As for PTRACE_GETREGS, we always use
+ * the 64-bit format. On a 32-bit kernel only the lower order half
+ * (according to endianness) will be used.
+ */
+int ptrace_setregs (struct task_struct *child, __s64 __user *data)
+{
+ struct pt_regs *regs;
+ int i;
+
+ if (!access_ok(VERIFY_READ, data, 38 * 8))
+ return -EIO;
+
+ regs = (struct pt_regs *) ((unsigned long) child->thread_info +
+ THREAD_SIZE - 32 - sizeof(struct pt_regs));
+
+ for (i = 0; i < 32; i++)
+ __get_user (regs->regs[i], data + i);
+ __get_user (regs->lo, data + EF_LO - EF_R0);
+ __get_user (regs->hi, data + EF_HI - EF_R0);
+ __get_user (regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
+
+ /* badvaddr, status, and cause may not be written. */
+
+ return 0;
+}
+
+int ptrace_getfpregs (struct task_struct *child, __u32 __user *data)
+{
+ int i;
+
+ if (!access_ok(VERIFY_WRITE, data, 33 * 8))
+ return -EIO;
+
+ if (tsk_used_math(child)) {
+ fpureg_t *fregs = get_fpu_regs(child);
+ for (i = 0; i < 32; i++)
+ __put_user (fregs[i], i + (__u64 __user *) data);
+ } else {
+ for (i = 0; i < 32; i++)
+ __put_user ((__u64) -1, i + (__u64 __user *) data);
+ }
+
+ if (cpu_has_fpu) {
+ unsigned int flags, tmp;
+
+ __put_user (child->thread.fpu.hard.fcr31, data + 64);
+
+ preempt_disable();
+ if (cpu_has_mipsmt) {
+ unsigned int vpflags = dvpe();
+ flags = read_c0_status();
+ __enable_fpu();
+ __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
+ write_c0_status(flags);
+ evpe(vpflags);
+ } else {
+ flags = read_c0_status();
+ __enable_fpu();
+ __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
+ write_c0_status(flags);
+ }
+ preempt_enable();
+ __put_user (tmp, data + 65);
+ } else {
+ __put_user (child->thread.fpu.soft.fcr31, data + 64);
+ __put_user ((__u32) 0, data + 65);
+ }
+
+ return 0;
+}
+
+int ptrace_setfpregs (struct task_struct *child, __u32 __user *data)
+{
+ fpureg_t *fregs;
+ int i;
+
+ if (!access_ok(VERIFY_READ, data, 33 * 8))
+ return -EIO;
+
+ fregs = get_fpu_regs(child);
+
+ for (i = 0; i < 32; i++)
+ __get_user (fregs[i], i + (__u64 __user *) data);
+
+ if (cpu_has_fpu)
+ __get_user (child->thread.fpu.hard.fcr31, data + 64);
+ else
+ __get_user (child->thread.fpu.soft.fcr31, data + 64);
+
+ /* FIR may not be written. */
+
+ return 0;
+}
+
+asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
{
struct task_struct *child;
int ret;
@@ -103,7 +230,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
ret = -EIO;
if (copied != sizeof(tmp))
break;
- ret = put_user(tmp,(unsigned long *) data);
+ ret = put_user(tmp,(unsigned long __user *) data);
break;
}
@@ -169,18 +296,53 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
if (!cpu_has_fpu)
break;
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
+ preempt_disable();
+ if (cpu_has_mipsmt) {
+ unsigned int vpflags = dvpe();
+ flags = read_c0_status();
+ __enable_fpu();
+ __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
+ write_c0_status(flags);
+ evpe(vpflags);
+ } else {
+ flags = read_c0_status();
+ __enable_fpu();
+ __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
+ write_c0_status(flags);
+ }
+ preempt_enable();
+ break;
+ }
+ case DSP_BASE ... DSP_BASE + 5: {
+ dspreg_t *dregs;
+
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out_tsk;
+ }
+ if (child->thread.dsp.used_dsp) {
+ dregs = __get_dsp_regs(child);
+ tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+ } else {
+ tmp = -1; /* DSP registers yet used */
+ }
break;
}
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out_tsk;
+ }
+ tmp = child->thread.dsp.dspcontrol;
+ break;
default:
tmp = 0;
ret = -EIO;
goto out_tsk;
}
- ret = put_user(tmp, (unsigned long *) data);
+ ret = put_user(tmp, (unsigned long __user *) data);
break;
}
@@ -247,6 +409,25 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
else
child->thread.fpu.soft.fcr31 = data;
break;
+ case DSP_BASE ... DSP_BASE + 5: {
+ dspreg_t *dregs;
+
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+
+ dregs = __get_dsp_regs(child);
+ dregs[addr - DSP_BASE] = data;
+ break;
+ }
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+ child->thread.dsp.dspcontrol = data;
+ break;
default:
/* The rest are not allowed. */
ret = -EIO;
@@ -255,6 +436,22 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
break;
}
+ case PTRACE_GETREGS:
+ ret = ptrace_getregs (child, (__u64 __user *) data);
+ break;
+
+ case PTRACE_SETREGS:
+ ret = ptrace_setregs (child, (__u64 __user *) data);
+ break;
+
+ case PTRACE_GETFPREGS:
+ ret = ptrace_getfpregs (child, (__u32 __user *) data);
+ break;
+
+ case PTRACE_SETFPREGS:
+ ret = ptrace_setfpregs (child, (__u32 __user *) data);
+ break;
+
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
case PTRACE_CONT: { /* restart after signal. */
ret = -EIO;
@@ -289,6 +486,11 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
ret = ptrace_detach(child, data);
break;
+ case PTRACE_GET_THREAD_AREA:
+ ret = put_user(child->thread_info->tp_value,
+ (unsigned long __user *) data);
+ break;
+
default:
ret = ptrace_request(child, request, addr, data);
break;
@@ -303,21 +505,14 @@ out:
static inline int audit_arch(void)
{
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-#ifdef CONFIG_64BIT
- if (!(current->thread.mflags & MF_32BIT_REGS))
- return AUDIT_ARCH_MIPSEL64;
-#endif /* MIPS64 */
- return AUDIT_ARCH_MIPSEL;
-
-#else /* big endian... */
+ int arch = EM_MIPS;
#ifdef CONFIG_64BIT
- if (!(current->thread.mflags & MF_32BIT_REGS))
- return AUDIT_ARCH_MIPS64;
-#endif /* MIPS64 */
- return AUDIT_ARCH_MIPS;
-
-#endif /* endian */
+ arch |= __AUDIT_ARCH_64BIT;
+#endif
+#if defined(__LITTLE_ENDIAN)
+ arch |= __AUDIT_ARCH_LE;
+#endif
+ return arch;
}
/*
@@ -327,12 +522,13 @@ static inline int audit_arch(void)
asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
{
if (unlikely(current->audit_context) && entryexit)
- audit_syscall_exit(current, AUDITSC_RESULT(regs->regs[2]), regs->regs[2]);
+ audit_syscall_exit(current, AUDITSC_RESULT(regs->regs[2]),
+ regs->regs[2]);
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- goto out;
if (!(current->ptrace & PT_PTRACED))
goto out;
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ goto out;
/* The 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index eee207969c21..9a9b04972132 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -24,17 +24,24 @@
#include <linux/smp_lock.h>
#include <linux/user.h>
#include <linux/security.h>
-#include <linux/signal.h>
#include <asm/cpu.h>
+#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/bootinfo.h>
+int ptrace_getregs (struct task_struct *child, __s64 __user *data);
+int ptrace_setregs (struct task_struct *child, __s64 __user *data);
+
+int ptrace_getfpregs (struct task_struct *child, __u32 __user *data);
+int ptrace_setfpregs (struct task_struct *child, __u32 __user *data);
+
/*
* Tracing a 32-bit process with a 64-bit strace and vice versa will not
* work. I don't know how to fix this.
@@ -99,6 +106,35 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
break;
}
+ /*
+ * Read 4 bytes of the other process' storage
+ * data is a pointer specifying where the user wants the
+ * 4 bytes copied into
+ * addr is a pointer in the user's storage that contains an 8 byte
+ * address in the other process of the 4 bytes that is to be read
+ * (this is run in a 32-bit process looking at a 64-bit process)
+ * when I and D space are separate, these will need to be fixed.
+ */
+ case PTRACE_PEEKTEXT_3264:
+ case PTRACE_PEEKDATA_3264: {
+ u32 tmp;
+ int copied;
+ u32 __user * addrOthers;
+
+ ret = -EIO;
+
+ /* Get the addr in the other process that we want to read */
+ if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
+ break;
+
+ copied = access_process_vm(child, (u64)addrOthers, &tmp,
+ sizeof(tmp), 0);
+ if (copied != sizeof(tmp))
+ break;
+ ret = put_user(tmp, (u32 __user *) (unsigned long) data);
+ break;
+ }
+
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
@@ -156,12 +192,44 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
if (!cpu_has_fpu)
break;
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
+ preempt_disable();
+ if (cpu_has_mipsmt) {
+ unsigned int vpflags = dvpe();
+ flags = read_c0_status();
+ __enable_fpu();
+ __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
+ write_c0_status(flags);
+ evpe(vpflags);
+ } else {
+ flags = read_c0_status();
+ __enable_fpu();
+ __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
+ write_c0_status(flags);
+ }
+ preempt_enable();
break;
}
+ case DSP_BASE ... DSP_BASE + 5:
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out_tsk;
+ }
+ if (child->thread.dsp.used_dsp) {
+ dspreg_t *dregs = __get_dsp_regs(child);
+ tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+ } else {
+ tmp = -1; /* DSP registers yet used */
+ }
+ break;
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out_tsk;
+ }
+ tmp = child->thread.dsp.dspcontrol;
+ break;
default:
tmp = 0;
ret = -EIO;
@@ -181,6 +249,31 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
ret = -EIO;
break;
+ /*
+ * Write 4 bytes into the other process' storage
+ * data is the 4 bytes that the user wants written
+ * addr is a pointer in the user's storage that contains an
+ * 8 byte address in the other process where the 4 bytes
+ * that is to be written
+ * (this is run in a 32-bit process looking at a 64-bit process)
+ * when I and D space are separate, these will need to be fixed.
+ */
+ case PTRACE_POKETEXT_3264:
+ case PTRACE_POKEDATA_3264: {
+ u32 __user * addrOthers;
+
+ /* Get the addr in the other process that we want to write into */
+ ret = -EIO;
+ if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
+ break;
+ ret = 0;
+ if (access_process_vm(child, (u64)addrOthers, &data,
+ sizeof(data), 1) == sizeof(data))
+ break;
+ ret = -EIO;
+ break;
+ }
+
case PTRACE_POKEUSR: {
struct pt_regs *regs;
ret = 0;
@@ -231,6 +324,22 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
else
child->thread.fpu.soft.fcr31 = data;
break;
+ case DSP_BASE ... DSP_BASE + 5:
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+
+ dspreg_t *dregs = __get_dsp_regs(child);
+ dregs[addr - DSP_BASE] = data;
+ break;
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+ child->thread.dsp.dspcontrol = data;
+ break;
default:
/* The rest are not allowed. */
ret = -EIO;
@@ -239,6 +348,22 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
break;
}
+ case PTRACE_GETREGS:
+ ret = ptrace_getregs (child, (__u64 __user *) (__u64) data);
+ break;
+
+ case PTRACE_SETREGS:
+ ret = ptrace_setregs (child, (__u64 __user *) (__u64) data);
+ break;
+
+ case PTRACE_GETFPREGS:
+ ret = ptrace_getfpregs (child, (__u32 __user *) (__u64) data);
+ break;
+
+ case PTRACE_SETFPREGS:
+ ret = ptrace_setfpregs (child, (__u32 __user *) (__u64) data);
+ break;
+
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
case PTRACE_CONT: { /* restart after signal. */
ret = -EIO;
@@ -269,10 +394,25 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
wake_up_process(child);
break;
+ case PTRACE_GET_THREAD_AREA:
+ ret = put_user(child->thread_info->tp_value,
+ (unsigned int __user *) (unsigned long) data);
+ break;
+
case PTRACE_DETACH: /* detach a process that was attached. */
ret = ptrace_detach(child, data);
break;
+ case PTRACE_GETEVENTMSG:
+ ret = put_user(child->ptrace_message,
+ (unsigned int __user *) (unsigned long) data);
+ break;
+
+ case PTRACE_GET_THREAD_AREA_3264:
+ ret = put_user(child->thread_info->tp_value,
+ (unsigned long __user *) (unsigned long) data);
+ break;
+
default:
ret = ptrace_request(child, request, addr, data);
break;
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 1a14c6b18829..283a98508fc8 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -32,7 +32,7 @@
.set noreorder
.set mips3
- /* Save floating point context */
+
LEAF(_save_fp_context)
cfc1 t1, fcr31
@@ -74,9 +74,6 @@ LEAF(_save_fp_context)
EX sdc1 $f28, SC_FPREGS+224(a0)
EX sdc1 $f30, SC_FPREGS+240(a0)
EX sw t1, SC_FPC_CSR(a0)
- cfc1 t0, $0 # implementation/version
- EX sw t0, SC_FPC_EIR(a0)
-
jr ra
li v0, 0 # success
END(_save_fp_context)
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
new file mode 100644
index 000000000000..8c81f3cb4e2d
--- /dev/null
+++ b/arch/mips/kernel/rtlx.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+#include <linux/elf.h>
+#include <linux/seq_file.h>
+#include <linux/syscalls.h>
+#include <linux/moduleloader.h>
+#include <linux/interrupt.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <asm/mipsmtregs.h>
+#include <asm/cacheflush.h>
+#include <asm/atomic.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/rtlx.h>
+
+#define RTLX_MAJOR 64
+#define RTLX_TARG_VPE 1
+
+struct rtlx_info *rtlx;
+static int major;
+static char module_name[] = "rtlx";
+static inline int spacefree(int read, int write, int size);
+
+static struct chan_waitqueues {
+ wait_queue_head_t rt_queue;
+ wait_queue_head_t lx_queue;
+} channel_wqs[RTLX_CHANNELS];
+
+static struct irqaction irq;
+static int irq_num;
+
+extern void *vpe_get_shared(int index);
+
+static void rtlx_dispatch(struct pt_regs *regs)
+{
+ do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ, regs);
+}
+
+irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ irqreturn_t r = IRQ_HANDLED;
+ int i;
+
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ struct rtlx_channel *chan = &rtlx->channel[i];
+
+ if (chan->lx_read != chan->lx_write)
+ wake_up_interruptible(&channel_wqs[i].lx_queue);
+ }
+
+ return r;
+}
+
+void dump_rtlx(void)
+{
+ int i;
+
+ printk("id 0x%lx state %d\n", rtlx->id, rtlx->state);
+
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ struct rtlx_channel *chan = &rtlx->channel[i];
+
+ printk(" rt_state %d lx_state %d buffer_size %d\n",
+ chan->rt_state, chan->lx_state, chan->buffer_size);
+
+ printk(" rt_read %d rt_write %d\n",
+ chan->rt_read, chan->rt_write);
+
+ printk(" lx_read %d lx_write %d\n",
+ chan->lx_read, chan->lx_write);
+
+ printk(" rt_buffer <%s>\n", chan->rt_buffer);
+ printk(" lx_buffer <%s>\n", chan->lx_buffer);
+ }
+}
+
+/* call when we have the address of the shared structure from the SP side. */
+static int rtlx_init(struct rtlx_info *rtlxi)
+{
+ int i;
+
+ if (rtlxi->id != RTLX_ID) {
+ printk(KERN_WARNING "no valid RTLX id at 0x%p\n", rtlxi);
+ return (-ENOEXEC);
+ }
+
+ /* initialise the wait queues */
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ init_waitqueue_head(&channel_wqs[i].rt_queue);
+ init_waitqueue_head(&channel_wqs[i].lx_queue);
+ }
+
+ /* set up for interrupt handling */
+ memset(&irq, 0, sizeof(struct irqaction));
+
+ if (cpu_has_vint) {
+ set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch);
+ }
+
+ irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ;
+ irq.handler = rtlx_interrupt;
+ irq.flags = SA_INTERRUPT;
+ irq.name = "RTLX";
+ irq.dev_id = rtlx;
+ setup_irq(irq_num, &irq);
+
+ rtlx = rtlxi;
+ return (0);
+}
+
+/* only allow one open process at a time to open each channel */
+static int rtlx_open(struct inode *inode, struct file *filp)
+{
+ int minor, ret;
+ struct rtlx_channel *chan;
+
+ /* assume only 1 device at the mo. */
+ minor = MINOR(inode->i_rdev);
+
+ if (rtlx == NULL) {
+ struct rtlx_info **p;
+ if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) {
+ printk(" vpe_get_shared is NULL. Has an SP program been loaded?\n");
+ return (-EFAULT);
+ }
+
+ if (*p == NULL) {
+ printk(" vpe_shared %p %p\n", p, *p);
+ return (-EFAULT);
+ }
+
+ if ((ret = rtlx_init(*p)) < 0)
+ return (ret);
+ }
+
+ chan = &rtlx->channel[minor];
+
+ /* already open? */
+ if (chan->lx_state == RTLX_STATE_OPENED)
+ return (-EBUSY);
+
+ chan->lx_state = RTLX_STATE_OPENED;
+ return (0);
+}
+
+static int rtlx_release(struct inode *inode, struct file *filp)
+{
+ int minor;
+
+ minor = MINOR(inode->i_rdev);
+ rtlx->channel[minor].lx_state = RTLX_STATE_UNUSED;
+ return (0);
+}
+
+static unsigned int rtlx_poll(struct file *file, poll_table * wait)
+{
+ int minor;
+ unsigned int mask = 0;
+ struct rtlx_channel *chan;
+
+ minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ chan = &rtlx->channel[minor];
+
+ poll_wait(file, &channel_wqs[minor].rt_queue, wait);
+ poll_wait(file, &channel_wqs[minor].lx_queue, wait);
+
+ /* data available to read? */
+ if (chan->lx_read != chan->lx_write)
+ mask |= POLLIN | POLLRDNORM;
+
+ /* space to write */
+ if (spacefree(chan->rt_read, chan->rt_write, chan->buffer_size))
+ mask |= POLLOUT | POLLWRNORM;
+
+ return (mask);
+}
+
+static ssize_t rtlx_read(struct file *file, char __user * buffer, size_t count,
+ loff_t * ppos)
+{
+ size_t fl = 0L;
+ int minor;
+ struct rtlx_channel *lx;
+ DECLARE_WAITQUEUE(wait, current);
+
+ minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ lx = &rtlx->channel[minor];
+
+ /* data available? */
+ if (lx->lx_write == lx->lx_read) {
+ if (file->f_flags & O_NONBLOCK)
+ return (0); // -EAGAIN makes cat whinge
+
+ /* go to sleep */
+ add_wait_queue(&channel_wqs[minor].lx_queue, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (lx->lx_write == lx->lx_read)
+ schedule();
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&channel_wqs[minor].lx_queue, &wait);
+
+ /* back running */
+ }
+
+ /* find out how much in total */
+ count = min( count,
+ (size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) % lx->buffer_size);
+
+ /* then how much from the read pointer onwards */
+ fl = min( count, (size_t)lx->buffer_size - lx->lx_read);
+
+ copy_to_user (buffer, &lx->lx_buffer[lx->lx_read], fl);
+
+ /* and if there is anything left at the beginning of the buffer */
+ if ( count - fl )
+ copy_to_user (buffer + fl, lx->lx_buffer, count - fl);
+
+ /* update the index */
+ lx->lx_read += count;
+ lx->lx_read %= lx->buffer_size;
+
+ return (count);
+}
+
+static inline int spacefree(int read, int write, int size)
+{
+ if (read == write) {
+ /* never fill the buffer completely, so indexes are always equal if empty
+ and only empty, or !equal if data available */
+ return (size - 1);
+ }
+
+ return ((read + size - write) % size) - 1;
+}
+
+static ssize_t rtlx_write(struct file *file, const char __user * buffer,
+ size_t count, loff_t * ppos)
+{
+ int minor;
+ struct rtlx_channel *rt;
+ size_t fl;
+ DECLARE_WAITQUEUE(wait, current);
+
+ minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ rt = &rtlx->channel[minor];
+
+ /* any space left... */
+ if (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) {
+
+ if (file->f_flags & O_NONBLOCK)
+ return (-EAGAIN);
+
+ add_wait_queue(&channel_wqs[minor].rt_queue, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size))
+ schedule();
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&channel_wqs[minor].rt_queue, &wait);
+ }
+
+ /* total number of bytes to copy */
+ count = min( count, (size_t)spacefree(rt->rt_read, rt->rt_write, rt->buffer_size) );
+
+ /* first bit from write pointer to the end of the buffer, or count */
+ fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
+
+ copy_from_user(&rt->rt_buffer[rt->rt_write], buffer, fl);
+
+ /* if there's any left copy to the beginning of the buffer */
+ if( count - fl )
+ copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
+
+ rt->rt_write += count;
+ rt->rt_write %= rt->buffer_size;
+
+ return(count);
+}
+
+static struct file_operations rtlx_fops = {
+ .owner = THIS_MODULE,
+ .open = rtlx_open,
+ .release = rtlx_release,
+ .write = rtlx_write,
+ .read = rtlx_read,
+ .poll = rtlx_poll
+};
+
+static int rtlx_module_init(void)
+{
+ if ((major = register_chrdev(RTLX_MAJOR, module_name, &rtlx_fops)) < 0) {
+ printk("rtlx_module_init: unable to register device\n");
+ return (-EBUSY);
+ }
+
+ if (major == 0)
+ major = RTLX_MAJOR;
+
+ return (0);
+}
+
+static void rtlx_module_exit(void)
+{
+ unregister_chrdev(major, module_name);
+}
+
+module_init(rtlx_module_init);
+module_exit(rtlx_module_exit);
+MODULE_DESCRIPTION("MIPS RTLX");
+MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc");
+MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 17b5030fb6ea..4dd8e8b4fbc2 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -578,7 +578,7 @@ einval: li v0, -EINVAL
sys sys_fremovexattr 2 /* 4235 */
sys sys_tkill 2
sys sys_sendfile64 5
- sys sys_futex 2
+ sys sys_futex 6
sys sys_sched_setaffinity 3
sys sys_sched_getaffinity 3 /* 4240 */
sys sys_io_setup 2
@@ -587,7 +587,7 @@ einval: li v0, -EINVAL
sys sys_io_submit 3
sys sys_io_cancel 3 /* 4245 */
sys sys_exit_group 1
- sys sys_lookup_dcookie 3
+ sys sys_lookup_dcookie 4
sys sys_epoll_create 1
sys sys_epoll_ctl 4
sys sys_epoll_wait 3 /* 4250 */
@@ -618,12 +618,15 @@ einval: li v0, -EINVAL
sys sys_mq_notify 2 /* 4275 */
sys sys_mq_getsetattr 3
sys sys_ni_syscall 0 /* sys_vserver */
- sys sys_waitid 4
+ sys sys_waitid 5
sys sys_ni_syscall 0 /* available, was setaltroot */
- sys sys_add_key 5
+ sys sys_add_key 5 /* 4280 */
sys sys_request_key 4
sys sys_keyctl 5
-
+ sys sys_set_thread_area 1
+ sys sys_inotify_init 0
+ sys sys_inotify_add_watch 3 /* 4285 */
+ sys sys_inotify_rm_watch 2
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index ffb22a2068bf..9085838d6ce3 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -449,3 +449,7 @@ sys_call_table:
PTR sys_add_key
PTR sys_request_key /* 5240 */
PTR sys_keyctl
+ PTR sys_set_thread_area
+ PTR sys_inotify_init
+ PTR sys_inotify_add_watch
+ PTR sys_inotify_rm_watch /* 5245 */
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index e52049c87bc3..7e66eb823bf6 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -176,7 +176,7 @@ EXPORT(sysn32_call_table)
PTR sys_fork
PTR sys32_execve
PTR sys_exit
- PTR sys32_wait4
+ PTR compat_sys_wait4
PTR sys_kill /* 6060 */
PTR sys32_newuname
PTR sys_semget
@@ -216,7 +216,7 @@ EXPORT(sysn32_call_table)
PTR compat_sys_getrusage
PTR sys32_sysinfo
PTR compat_sys_times
- PTR sys_ptrace
+ PTR sys32_ptrace
PTR sys_getuid /* 6100 */
PTR sys_syslog
PTR sys_getgid
@@ -243,14 +243,14 @@ EXPORT(sysn32_call_table)
PTR sys_capget
PTR sys_capset
PTR sys32_rt_sigpending /* 6125 */
- PTR compat_sys_rt_sigtimedwait
- PTR sys32_rt_sigqueueinfo
+ PTR sysn32_rt_sigtimedwait
+ PTR sys_rt_sigqueueinfo
PTR sys32_rt_sigsuspend
PTR sys32_sigaltstack
PTR compat_sys_utime /* 6130 */
PTR sys_mknod
PTR sys32_personality
- PTR sys_ustat
+ PTR sys32_ustat
PTR compat_sys_statfs
PTR compat_sys_fstatfs /* 6135 */
PTR sys_sysfs
@@ -329,7 +329,7 @@ EXPORT(sysn32_call_table)
PTR sys_epoll_wait
PTR sys_remap_file_pages /* 6210 */
PTR sysn32_rt_sigreturn
- PTR sys_fcntl
+ PTR compat_sys_fcntl64
PTR sys_set_tid_address
PTR sys_restart_syscall
PTR sys_semtimedop /* 6215 */
@@ -337,15 +337,15 @@ EXPORT(sysn32_call_table)
PTR compat_sys_statfs64
PTR compat_sys_fstatfs64
PTR sys_sendfile64
- PTR sys_timer_create /* 6220 */
- PTR sys_timer_settime
- PTR sys_timer_gettime
+ PTR sys32_timer_create /* 6220 */
+ PTR compat_sys_timer_settime
+ PTR compat_sys_timer_gettime
PTR sys_timer_getoverrun
PTR sys_timer_delete
- PTR sys_clock_settime /* 6225 */
- PTR sys_clock_gettime
- PTR sys_clock_getres
- PTR sys_clock_nanosleep
+ PTR compat_sys_clock_settime /* 6225 */
+ PTR compat_sys_clock_gettime
+ PTR compat_sys_clock_getres
+ PTR compat_sys_clock_nanosleep
PTR sys_tgkill
PTR compat_sys_utimes /* 6230 */
PTR sys_ni_syscall /* sys_mbind */
@@ -358,8 +358,12 @@ EXPORT(sysn32_call_table)
PTR compat_sys_mq_notify
PTR compat_sys_mq_getsetattr
PTR sys_ni_syscall /* 6240, sys_vserver */
- PTR sys_waitid
+ PTR sysn32_waitid
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key
PTR sys_request_key
PTR sys_keyctl /* 6245 */
+ PTR sys_set_thread_area
+ PTR sys_inotify_init
+ PTR sys_inotify_add_watch
+ PTR sys_inotify_rm_watch
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 739f3998d76b..5a16401e443a 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -316,13 +316,13 @@ sys_call_table:
PTR sys_vhangup
PTR sys_ni_syscall /* was sys_idle */
PTR sys_ni_syscall /* sys_vm86 */
- PTR sys32_wait4
+ PTR compat_sys_wait4
PTR sys_swapoff /* 4115 */
PTR sys32_sysinfo
PTR sys32_ipc
PTR sys_fsync
PTR sys32_sigreturn
- PTR sys_clone /* 4120 */
+ PTR sys32_clone /* 4120 */
PTR sys_setdomainname
PTR sys32_newuname
PTR sys_ni_syscall /* sys_modify_ldt */
@@ -391,7 +391,7 @@ sys_call_table:
PTR sys_getresuid
PTR sys_ni_syscall /* was query_module */
PTR sys_poll
- PTR sys_nfsservctl
+ PTR compat_sys_nfsservctl
PTR sys_setresgid /* 4190 */
PTR sys_getresgid
PTR sys_prctl
@@ -459,7 +459,7 @@ sys_call_table:
PTR sys_fadvise64_64
PTR compat_sys_statfs64 /* 4255 */
PTR compat_sys_fstatfs64
- PTR sys_timer_create
+ PTR sys32_timer_create
PTR compat_sys_timer_settime
PTR compat_sys_timer_gettime
PTR sys_timer_getoverrun /* 4260 */
@@ -480,9 +480,13 @@ sys_call_table:
PTR compat_sys_mq_notify /* 4275 */
PTR compat_sys_mq_getsetattr
PTR sys_ni_syscall /* sys_vserver */
- PTR sys_waitid
+ PTR sys32_waitid
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key /* 4280 */
PTR sys_request_key
PTR sys_keyctl
+ PTR sys_set_thread_area
+ PTR sys_inotify_init
+ PTR sys_inotify_add_watch /* 4285 */
+ PTR sys_inotify_rm_watch
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/semaphore.c b/arch/mips/kernel/semaphore.c
index 9c40fe5a8e8d..1265358cdca1 100644
--- a/arch/mips/kernel/semaphore.c
+++ b/arch/mips/kernel/semaphore.c
@@ -42,24 +42,28 @@ static inline int __sem_update_count(struct semaphore *sem, int incr)
if (cpu_has_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
- "1: ll %0, %2 \n"
+ " .set mips3 \n"
+ "1: ll %0, %2 # __sem_update_count \n"
" sra %1, %0, 31 \n"
" not %1 \n"
" and %1, %0, %1 \n"
- " add %1, %1, %3 \n"
+ " addu %1, %1, %3 \n"
" sc %1, %2 \n"
" beqzl %1, 1b \n"
+ " .set mips0 \n"
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
: "r" (incr), "m" (sem->count));
} else if (cpu_has_llsc) {
__asm__ __volatile__(
- "1: ll %0, %2 \n"
+ " .set mips3 \n"
+ "1: ll %0, %2 # __sem_update_count \n"
" sra %1, %0, 31 \n"
" not %1 \n"
" and %1, %0, %1 \n"
- " add %1, %1, %3 \n"
+ " addu %1, %1, %3 \n"
" sc %1, %2 \n"
" beqz %1, 1b \n"
+ " .set mips0 \n"
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
: "r" (incr), "m" (sem->count));
} else {
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 12b531c295c4..d86affa21278 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -37,12 +37,13 @@
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
+#include <asm/cache.h>
#include <asm/cpu.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/system.h>
-struct cpuinfo_mips cpu_data[NR_CPUS];
+struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_data);
@@ -62,8 +63,8 @@ EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
*
* These are initialized so they are in the .data section
*/
-unsigned long mips_machtype = MACH_UNKNOWN;
-unsigned long mips_machgroup = MACH_GROUP_UNKNOWN;
+unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
+unsigned long mips_machgroup __read_mostly = MACH_GROUP_UNKNOWN;
EXPORT_SYMBOL(mips_machtype);
EXPORT_SYMBOL(mips_machgroup);
@@ -77,7 +78,7 @@ static char command_line[CL_SIZE];
* mips_io_port_base is the begin of the address space to which x86 style
* I/O ports are mapped.
*/
-const unsigned long mips_io_port_base = -1;
+const unsigned long mips_io_port_base __read_mostly = -1;
EXPORT_SYMBOL(mips_io_port_base);
/*
@@ -510,31 +511,7 @@ static inline void resource_init(void)
#undef MAXMEM
#undef MAXMEM_PFN
-static int __initdata earlyinit_debug;
-
-static int __init earlyinit_debug_setup(char *str)
-{
- earlyinit_debug = 1;
- return 1;
-}
-__setup("earlyinit_debug", earlyinit_debug_setup);
-
-extern initcall_t __earlyinitcall_start, __earlyinitcall_end;
-
-static void __init do_earlyinitcalls(void)
-{
- initcall_t *call, *start, *end;
-
- start = &__earlyinitcall_start;
- end = &__earlyinitcall_end;
-
- for (call = start; call < end; call++) {
- if (earlyinit_debug)
- printk("calling earlyinitcall 0x%p\n", *call);
-
- (*call)();
- }
-}
+extern void plat_setup(void);
void __init setup_arch(char **cmdline_p)
{
@@ -551,7 +528,7 @@ void __init setup_arch(char **cmdline_p)
#endif
/* call board setup routine */
- do_earlyinitcalls();
+ plat_setup();
strlcpy(command_line, arcs_cmdline, sizeof(command_line));
strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
@@ -573,3 +550,12 @@ int __init fpu_disable(char *s)
}
__setup("nofpu", fpu_disable);
+
+int __init dsp_disable(char *s)
+{
+ cpu_data[0].ases &= ~MIPS_ASE_DSP;
+
+ return 1;
+}
+
+__setup("nodsp", dsp_disable);
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index f9234df53253..0f66ae5838b9 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -8,13 +8,14 @@
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
+#include <linux/config.h>
+
static inline int
setup_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
{
int err = 0;
err |= __put_user(regs->cp0_epc, &sc->sc_pc);
- err |= __put_user(regs->cp0_status, &sc->sc_status);
#define save_gp_reg(i) do { \
err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \
@@ -30,10 +31,32 @@ setup_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
save_gp_reg(31);
#undef save_gp_reg
+#ifdef CONFIG_32BIT
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
- err |= __put_user(regs->cp0_cause, &sc->sc_cause);
- err |= __put_user(regs->cp0_badvaddr, &sc->sc_badvaddr);
+ if (cpu_has_dsp) {
+ err |= __put_user(mfhi1(), &sc->sc_hi1);
+ err |= __put_user(mflo1(), &sc->sc_lo1);
+ err |= __put_user(mfhi2(), &sc->sc_hi2);
+ err |= __put_user(mflo2(), &sc->sc_lo2);
+ err |= __put_user(mfhi3(), &sc->sc_hi3);
+ err |= __put_user(mflo3(), &sc->sc_lo3);
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
+ }
+#endif
+#ifdef CONFIG_64BIT
+ err |= __put_user(regs->hi, &sc->sc_hi[0]);
+ err |= __put_user(regs->lo, &sc->sc_lo[0]);
+ if (cpu_has_dsp) {
+ err |= __put_user(mfhi1(), &sc->sc_hi[1]);
+ err |= __put_user(mflo1(), &sc->sc_lo[1]);
+ err |= __put_user(mfhi2(), &sc->sc_hi[2]);
+ err |= __put_user(mflo2(), &sc->sc_lo[2]);
+ err |= __put_user(mfhi3(), &sc->sc_hi[3]);
+ err |= __put_user(mflo3(), &sc->sc_lo[3]);
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
+ }
+#endif
err |= __put_user(!!used_math(), &sc->sc_used_math);
@@ -61,15 +84,40 @@ out:
static inline int
restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
{
- int err = 0;
unsigned int used_math;
+ unsigned long treg;
+ int err = 0;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
+#ifdef CONFIG_32BIT
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
+ err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
+ err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
+#endif
+#ifdef CONFIG_64BIT
+ err |= __get_user(regs->hi, &sc->sc_hi[0]);
+ err |= __get_user(regs->lo, &sc->sc_lo[0]);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi[1]); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo[1]); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_hi[2]); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo[2]); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_hi[3]); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo[3]); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
+#endif
#define restore_gp_reg(i) do { \
err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
@@ -112,7 +160,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
static inline void *
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
{
- unsigned long sp, almask;
+ unsigned long sp;
/* Default to using normal stack */
sp = regs->regs[29];
@@ -128,10 +176,32 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
sp = current->sas_ss_sp + current->sas_ss_size;
- if (PLAT_TRAMPOLINE_STUFF_LINE)
- almask = ~(PLAT_TRAMPOLINE_STUFF_LINE - 1);
- else
- almask = ALMASK;
+ return (void *)((sp - frame_size) & (ICACHE_REFILLS_WORKAROUND_WAR ? 32 : ALMASK));
+}
+
+static inline int install_sigtramp(unsigned int __user *tramp,
+ unsigned int syscall)
+{
+ int err;
+
+ /*
+ * Set up the return code ...
+ *
+ * li v0, __NR__foo_sigreturn
+ * syscall
+ */
+
+ err = __put_user(0x24020000 + syscall, tramp + 0);
+ err |= __put_user(0x0000000c , tramp + 1);
+ if (ICACHE_REFILLS_WORKAROUND_WAR) {
+ err |= __put_user(0, tramp + 2);
+ err |= __put_user(0, tramp + 3);
+ err |= __put_user(0, tramp + 4);
+ err |= __put_user(0, tramp + 5);
+ err |= __put_user(0, tramp + 6);
+ err |= __put_user(0, tramp + 7);
+ }
+ flush_cache_sigtramp((unsigned long) tramp);
- return (void *)((sp - frame_size) & almask);
+ return err;
}
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 0209c1dd1429..9202a17db8f7 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -8,6 +8,7 @@
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/config.h>
+#include <linux/cache.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/personality.h>
@@ -21,6 +22,7 @@
#include <linux/unistd.h>
#include <linux/compiler.h>
+#include <asm/abi.h>
#include <asm/asm.h>
#include <linux/bitops.h>
#include <asm/cacheflush.h>
@@ -29,6 +31,7 @@
#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include <asm/cpu-features.h>
+#include <asm/war.h>
#include "signal-common.h"
@@ -36,7 +39,7 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-static int do_signal(sigset_t *oldset, struct pt_regs *regs);
+int do_signal(sigset_t *oldset, struct pt_regs *regs);
/*
* Atomically swap in the new signal mask, and wait for a signal.
@@ -47,9 +50,10 @@ save_static_function(sys_sigsuspend);
__attribute_used__ noinline static int
_sys_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
- sigset_t *uset, saveset, newset;
+ sigset_t saveset, newset;
+ sigset_t __user *uset;
- uset = (sigset_t *) regs.regs[4];
+ uset = (sigset_t __user *) regs.regs[4];
if (copy_from_user(&newset, uset, sizeof(sigset_t)))
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
@@ -75,7 +79,8 @@ save_static_function(sys_rt_sigsuspend);
__attribute_used__ noinline static int
_sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
- sigset_t *unewset, saveset, newset;
+ sigset_t saveset, newset;
+ sigset_t __user *unewset;
size_t sigsetsize;
/* XXX Don't preclude handling different sized sigset_t's. */
@@ -83,7 +88,7 @@ _sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
- unewset = (sigset_t *) regs.regs[4];
+ unewset = (sigset_t __user *) regs.regs[4];
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
@@ -147,33 +152,46 @@ asmlinkage int sys_sigaction(int sig, const struct sigaction *act,
asmlinkage int sys_sigaltstack(nabi_no_regargs struct pt_regs regs)
{
- const stack_t *uss = (const stack_t *) regs.regs[4];
- stack_t *uoss = (stack_t *) regs.regs[5];
+ const stack_t __user *uss = (const stack_t __user *) regs.regs[4];
+ stack_t __user *uoss = (stack_t __user *) regs.regs[5];
unsigned long usp = regs.regs[29];
return do_sigaltstack(uss, uoss, usp);
}
-#if PLAT_TRAMPOLINE_STUFF_LINE
-#define __tramp __attribute__((aligned(PLAT_TRAMPOLINE_STUFF_LINE)))
-#else
-#define __tramp
-#endif
-
+/*
+ * Horribly complicated - with the bloody RM9000 workarounds enabled
+ * the signal trampolines is moving to the end of the structure so we can
+ * increase the alignment without breaking software compatibility.
+ */
#ifdef CONFIG_TRAD_SIGNALS
struct sigframe {
u32 sf_ass[4]; /* argument save space for o32 */
- u32 sf_code[2] __tramp; /* signal trampoline */
- struct sigcontext sf_sc __tramp;
+#if ICACHE_REFILLS_WORKAROUND_WAR
+ u32 sf_pad[2];
+#else
+ u32 sf_code[2]; /* signal trampoline */
+#endif
+ struct sigcontext sf_sc;
sigset_t sf_mask;
+#if ICACHE_REFILLS_WORKAROUND_WAR
+ u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */
+#endif
};
#endif
struct rt_sigframe {
u32 rs_ass[4]; /* argument save space for o32 */
- u32 rs_code[2] __tramp; /* signal trampoline */
- struct siginfo rs_info __tramp;
+#if ICACHE_REFILLS_WORKAROUND_WAR
+ u32 rs_pad[2];
+#else
+ u32 rs_code[2]; /* signal trampoline */
+#endif
+ struct siginfo rs_info;
struct ucontext rs_uc;
+#if ICACHE_REFILLS_WORKAROUND_WAR
+ u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */
+#endif
};
#ifdef CONFIG_TRAD_SIGNALS
@@ -214,7 +232,7 @@ _sys_sigreturn(nabi_no_regargs struct pt_regs regs)
badframe:
force_sig(SIGSEGV, current);
}
-#endif
+#endif /* CONFIG_TRAD_SIGNALS */
save_static_function(sys_rt_sigreturn);
__attribute_used__ noinline static void
@@ -260,7 +278,7 @@ badframe:
}
#ifdef CONFIG_TRAD_SIGNALS
-static void inline setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
+int setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
int signr, sigset_t *set)
{
struct sigframe *frame;
@@ -270,17 +288,7 @@ static void inline setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
- /*
- * Set up the return code ...
- *
- * li v0, __NR_sigreturn
- * syscall
- */
- if (PLAT_TRAMPOLINE_STUFF_LINE)
- __clear_user(frame->sf_code, PLAT_TRAMPOLINE_STUFF_LINE);
- err |= __put_user(0x24020000 + __NR_sigreturn, frame->sf_code + 0);
- err |= __put_user(0x0000000c , frame->sf_code + 1);
- flush_cache_sigtramp((unsigned long) frame->sf_code);
+ install_sigtramp(frame->sf_code, __NR_sigreturn);
err |= setup_sigcontext(regs, &frame->sf_sc);
err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
@@ -309,14 +317,15 @@ static void inline setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
current->comm, current->pid,
frame, regs->cp0_epc, frame->regs[31]);
#endif
- return;
+ return 1;
give_sigsegv:
force_sigsegv(signr, current);
+ return 0;
}
#endif
-static void inline setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
+int setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
int signr, sigset_t *set, siginfo_t *info)
{
struct rt_sigframe *frame;
@@ -326,17 +335,7 @@ static void inline setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
- /*
- * Set up the return code ...
- *
- * li v0, __NR_rt_sigreturn
- * syscall
- */
- if (PLAT_TRAMPOLINE_STUFF_LINE)
- __clear_user(frame->rs_code, PLAT_TRAMPOLINE_STUFF_LINE);
- err |= __put_user(0x24020000 + __NR_rt_sigreturn, frame->rs_code + 0);
- err |= __put_user(0x0000000c , frame->rs_code + 1);
- flush_cache_sigtramp((unsigned long) frame->rs_code);
+ install_sigtramp(frame->rs_code, __NR_rt_sigreturn);
/* Create siginfo. */
err |= copy_siginfo_to_user(&frame->rs_info, info);
@@ -378,18 +377,21 @@ static void inline setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
#endif
- return;
+ return 1;
give_sigsegv:
force_sigsegv(signr, current);
+ return 0;
}
extern void setup_rt_frame_n32(struct k_sigaction * ka,
struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info);
-static inline void handle_signal(unsigned long sig, siginfo_t *info,
+static inline int handle_signal(unsigned long sig, siginfo_t *info,
struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs)
{
+ int ret;
+
switch(regs->regs[0]) {
case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND:
@@ -408,22 +410,10 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info,
regs->regs[0] = 0; /* Don't deal with this again. */
-#ifdef CONFIG_TRAD_SIGNALS
- if (ka->sa.sa_flags & SA_SIGINFO) {
-#else
- if (1) {
-#endif
-#ifdef CONFIG_MIPS32_N32
- if ((current->thread.mflags & MF_ABI_MASK) == MF_N32)
- setup_rt_frame_n32 (ka, regs, sig, oldset, info);
- else
-#endif
- setup_rt_frame(ka, regs, sig, oldset, info);
- }
-#ifdef CONFIG_TRAD_SIGNALS
+ if (sig_uses_siginfo(ka))
+ ret = current->thread.abi->setup_rt_frame(ka, regs, sig, oldset, info);
else
- setup_frame(ka, regs, sig, oldset);
-#endif
+ ret = current->thread.abi->setup_frame(ka, regs, sig, oldset);
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -431,23 +421,16 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info,
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
-}
-extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
-extern int do_irix_signal(sigset_t *oldset, struct pt_regs *regs);
+ return ret;
+}
-static int do_signal(sigset_t *oldset, struct pt_regs *regs)
+int do_signal(sigset_t *oldset, struct pt_regs *regs)
{
struct k_sigaction ka;
siginfo_t info;
int signr;
-#ifdef CONFIG_BINFMT_ELF32
- if ((current->thread.mflags & MF_ABI_MASK) == MF_O32) {
- return do_signal32(oldset, regs);
- }
-#endif
-
/*
* We want the common case to go fast, which is why we may in certain
* cases get here from kernel mode. Just return without doing anything
@@ -463,10 +446,8 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs)
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0) {
- handle_signal(signr, &info, &ka, oldset, regs);
- return 1;
- }
+ if (signr > 0)
+ return handle_signal(signr, &info, &ka, oldset, regs);
no_signal:
/*
@@ -499,18 +480,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
{
/* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING) {
-#ifdef CONFIG_BINFMT_ELF32
- if (likely((current->thread.mflags & MF_ABI_MASK) == MF_O32)) {
- do_signal32(oldset, regs);
- return;
- }
-#endif
-#ifdef CONFIG_BINFMT_IRIX
- if (unlikely(current->personality != PER_LINUX)) {
- do_irix_signal(oldset, regs);
- return;
- }
-#endif
- do_signal(oldset, regs);
+ current->thread.abi->do_signal(oldset, regs);
}
}
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 8ddfbd8d425a..dbe821303125 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -7,6 +7,7 @@
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
+#include <linux/cache.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
@@ -21,6 +22,7 @@
#include <linux/suspend.h>
#include <linux/compiler.h>
+#include <asm/abi.h>
#include <asm/asm.h>
#include <linux/bitops.h>
#include <asm/cacheflush.h>
@@ -29,6 +31,7 @@
#include <asm/ucontext.h>
#include <asm/system.h>
#include <asm/fpu.h>
+#include <asm/war.h>
#define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3)
@@ -76,8 +79,10 @@ typedef struct compat_siginfo {
/* POSIX.1b timers */
struct {
- unsigned int _timer1;
- unsigned int _timer2;
+ timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ compat_sigval_t _sigval;/* same as below */
+ int _sys_private; /* not to be passed to user */
} _timer;
/* POSIX.1b signals */
@@ -259,11 +264,12 @@ asmlinkage int sys32_sigaction(int sig, const struct sigaction32 *act,
if (act) {
old_sigset_t mask;
+ s32 handler;
if (!access_ok(VERIFY_READ, act, sizeof(*act)))
return -EFAULT;
- err |= __get_user((u32)(u64)new_ka.sa.sa_handler,
- &act->sa_handler);
+ err |= __get_user(handler, &act->sa_handler);
+ new_ka.sa.sa_handler = (void*)(s64)handler;
err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
err |= __get_user(mask, &act->sa_mask.sig[0]);
if (err)
@@ -331,8 +337,9 @@ asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs)
static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 *sc)
{
+ u32 used_math;
int err = 0;
- __u32 used_math;
+ s32 treg;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
@@ -340,6 +347,15 @@ static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 *sc)
err |= __get_user(regs->cp0_epc, &sc->sc_pc);
err |= __get_user(regs->hi, &sc->sc_mdhi);
err |= __get_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
+ err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
+ err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
#define restore_gp_reg(i) do { \
err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
@@ -378,16 +394,30 @@ static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 *sc)
struct sigframe {
u32 sf_ass[4]; /* argument save space for o32 */
+#if ICACHE_REFILLS_WORKAROUND_WAR
+ u32 sf_pad[2];
+#else
u32 sf_code[2]; /* signal trampoline */
+#endif
struct sigcontext32 sf_sc;
sigset_t sf_mask;
+#if ICACHE_REFILLS_WORKAROUND_WAR
+ u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */
+#endif
};
struct rt_sigframe32 {
u32 rs_ass[4]; /* argument save space for o32 */
+#if ICACHE_REFILLS_WORKAROUND_WAR
+ u32 rs_pad[2];
+#else
u32 rs_code[2]; /* signal trampoline */
+#endif
compat_siginfo_t rs_info;
struct ucontext32 rs_uc;
+#if ICACHE_REFILLS_WORKAROUND_WAR
+ u32 rs_code[8] __attribute__((aligned(32))); /* signal trampoline */
+#endif
};
int copy_siginfo_to_user32(compat_siginfo_t *to, siginfo_t *from)
@@ -411,6 +441,11 @@ int copy_siginfo_to_user32(compat_siginfo_t *to, siginfo_t *from)
err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
else {
switch (from->si_code >> 16) {
+ case __SI_TIMER >> 16:
+ err |= __put_user(from->si_tid, &to->si_tid);
+ err |= __put_user(from->si_overrun, &to->si_overrun);
+ err |= __put_user(from->si_int, &to->si_int);
+ break;
case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime);
@@ -480,6 +515,7 @@ __attribute_used__ noinline static void
_sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
{
struct rt_sigframe32 *frame;
+ mm_segment_t old_fs;
sigset_t set;
stack_t st;
s32 sp;
@@ -510,7 +546,10 @@ _sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
do_sigaltstack(&st, NULL, regs.regs[29]);
+ set_fs (old_fs);
/*
* Don't let your children do this ...
@@ -550,8 +589,15 @@ static inline int setup_sigcontext32(struct pt_regs *regs,
err |= __put_user(regs->hi, &sc->sc_mdhi);
err |= __put_user(regs->lo, &sc->sc_mdlo);
- err |= __put_user(regs->cp0_cause, &sc->sc_cause);
- err |= __put_user(regs->cp0_badvaddr, &sc->sc_badvaddr);
+ if (cpu_has_dsp) {
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_hi1);
+ err |= __put_user(mfhi1(), &sc->sc_hi1);
+ err |= __put_user(mflo1(), &sc->sc_lo1);
+ err |= __put_user(mfhi2(), &sc->sc_hi2);
+ err |= __put_user(mflo2(), &sc->sc_lo2);
+ err |= __put_user(mfhi3(), &sc->sc_hi3);
+ err |= __put_user(mflo3(), &sc->sc_lo3);
+ }
err |= __put_user(!!used_math(), &sc->sc_used_math);
@@ -601,7 +647,7 @@ static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
return (void *)((sp - frame_size) & ALMASK);
}
-static inline void setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
+void setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs,
int signr, sigset_t *set)
{
struct sigframe *frame;
@@ -654,9 +700,7 @@ give_sigsegv:
force_sigsegv(signr, current);
}
-static inline void setup_rt_frame(struct k_sigaction * ka,
- struct pt_regs *regs, int signr,
- sigset_t *set, siginfo_t *info)
+void setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info)
{
struct rt_sigframe32 *frame;
int err = 0;
@@ -725,9 +769,11 @@ give_sigsegv:
force_sigsegv(signr, current);
}
-static inline void handle_signal(unsigned long sig, siginfo_t *info,
+static inline int handle_signal(unsigned long sig, siginfo_t *info,
struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs)
{
+ int ret;
+
switch (regs->regs[0]) {
case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND:
@@ -747,9 +793,9 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info,
regs->regs[0] = 0; /* Don't deal with this again. */
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame(ka, regs, sig, oldset, info);
+ ret = current->thread.abi->setup_rt_frame(ka, regs, sig, oldset, info);
else
- setup_frame(ka, regs, sig, oldset);
+ ret = current->thread.abi->setup_frame(ka, regs, sig, oldset);
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
@@ -757,6 +803,8 @@ static inline void handle_signal(unsigned long sig, siginfo_t *info,
sigaddset(&current->blocked,sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
+
+ return ret;
}
int do_signal32(sigset_t *oldset, struct pt_regs *regs)
@@ -780,10 +828,8 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0) {
- handle_signal(signr, &info, &ka, oldset, regs);
- return 1;
- }
+ if (signr > 0)
+ return handle_signal(signr, &info, &ka, oldset, regs);
no_signal:
/*
@@ -819,12 +865,13 @@ asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 *act,
goto out;
if (act) {
+ s32 handler;
int err = 0;
if (!access_ok(VERIFY_READ, act, sizeof(*act)))
return -EFAULT;
- err |= __get_user((u32)(u64)new_sa.sa.sa_handler,
- &act->sa_handler);
+ err |= __get_user(handler, &act->sa_handler);
+ new_sa.sa.sa_handler = (void*)(s64)handler;
err |= __get_user(new_sa.sa.sa_flags, &act->sa_flags);
err |= get_sigset(&new_sa.sa.sa_mask, &act->sa_mask);
if (err)
@@ -902,3 +949,30 @@ asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t *uinfo)
set_fs (old_fs);
return ret;
}
+
+asmlinkage long
+sys32_waitid(int which, compat_pid_t pid,
+ compat_siginfo_t __user *uinfo, int options,
+ struct compat_rusage __user *uru)
+{
+ siginfo_t info;
+ struct rusage ru;
+ long ret;
+ mm_segment_t old_fs = get_fs();
+
+ info.si_signo = 0;
+ set_fs (KERNEL_DS);
+ ret = sys_waitid(which, pid, (siginfo_t __user *) &info, options,
+ uru ? (struct rusage __user *) &ru : NULL);
+ set_fs (old_fs);
+
+ if (ret < 0 || info.si_signo == 0)
+ return ret;
+
+ if (uru && (ret = put_compat_rusage(&ru, uru)))
+ return ret;
+
+ BUG_ON(info.si_code & __SI_MASK);
+ info.si_code |= __SI_CHLD;
+ return copy_siginfo_to_user32(uinfo, &info);
+}
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index 3544208d4b4b..ec61b2670ba6 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -15,6 +15,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/cache.h>
+#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
@@ -36,6 +38,7 @@
#include <asm/system.h>
#include <asm/fpu.h>
#include <asm/cpu-features.h>
+#include <asm/war.h>
#include "signal-common.h"
@@ -62,17 +65,18 @@ struct ucontextn32 {
sigset_t uc_sigmask; /* mask last for extensibility */
};
-#if PLAT_TRAMPOLINE_STUFF_LINE
-#define __tramp __attribute__((aligned(PLAT_TRAMPOLINE_STUFF_LINE)))
-#else
-#define __tramp
-#endif
-
struct rt_sigframe_n32 {
u32 rs_ass[4]; /* argument save space for o32 */
- u32 rs_code[2] __tramp; /* signal trampoline */
- struct siginfo rs_info __tramp;
+#if ICACHE_REFILLS_WORKAROUND_WAR
+ u32 rs_pad[2];
+#else
+ u32 rs_code[2]; /* signal trampoline */
+#endif
+ struct siginfo rs_info;
struct ucontextn32 rs_uc;
+#if ICACHE_REFILLS_WORKAROUND_WAR
+ u32 rs_code[8] ____cacheline_aligned; /* signal trampoline */
+#endif
};
save_static_function(sysn32_rt_sigreturn);
@@ -126,7 +130,7 @@ badframe:
force_sig(SIGSEGV, current);
}
-void setup_rt_frame_n32(struct k_sigaction * ka,
+int setup_rt_frame_n32(struct k_sigaction * ka,
struct pt_regs *regs, int signr, sigset_t *set, siginfo_t *info)
{
struct rt_sigframe_n32 *frame;
@@ -137,17 +141,7 @@ void setup_rt_frame_n32(struct k_sigaction * ka,
if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
goto give_sigsegv;
- /*
- * Set up the return code ...
- *
- * li v0, __NR_rt_sigreturn
- * syscall
- */
- if (PLAT_TRAMPOLINE_STUFF_LINE)
- __clear_user(frame->rs_code, PLAT_TRAMPOLINE_STUFF_LINE);
- err |= __put_user(0x24020000 + __NR_N32_rt_sigreturn, frame->rs_code + 0);
- err |= __put_user(0x0000000c , frame->rs_code + 1);
- flush_cache_sigtramp((unsigned long) frame->rs_code);
+ install_sigtramp(frame->rs_code, __NR_N32_rt_sigreturn);
/* Create siginfo. */
err |= copy_siginfo_to_user(&frame->rs_info, info);
@@ -190,8 +184,9 @@ void setup_rt_frame_n32(struct k_sigaction * ka,
current->comm, current->pid,
frame, regs->cp0_epc, regs->regs[31]);
#endif
- return;
+ return 1;
give_sigsegv:
force_sigsegv(signr, current);
+ return 0;
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index af5cd3b8a396..fcacf1aae98a 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -50,7 +50,6 @@ static void smp_tune_scheduling (void)
{
struct cache_desc *cd = &current_cpu_data.scache;
unsigned long cachesize; /* kB */
- unsigned long bandwidth = 350; /* MB/s */
unsigned long cpu_khz;
/*
@@ -121,7 +120,19 @@ struct call_data_struct *call_data;
* or are or have executed.
*
* You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
+ * hardware interrupt handler or from a bottom half handler:
+ *
+ * CPU A CPU B
+ * Disable interrupts
+ * smp_call_function()
+ * Take call_lock
+ * Send IPIs
+ * Wait for all cpus to acknowledge IPI
+ * CPU A has not responded, spin waiting
+ * for cpu A to respond, holding call_lock
+ * smp_call_function()
+ * Spin waiting for call_lock
+ * Deadlock Deadlock
*/
int smp_call_function (void (*func) (void *info), void *info, int retry,
int wait)
@@ -130,6 +141,11 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
int i, cpus = num_online_cpus() - 1;
int cpu = smp_processor_id();
+ /*
+ * Can die spectacularly if this CPU isn't yet marked online
+ */
+ BUG_ON(!cpu_online(cpu));
+
if (!cpus)
return 0;
@@ -214,7 +230,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
/* called from main before smp_init() */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- cpu_data[0].udelay_val = loops_per_jiffy;
init_new_context(current, &init_mm);
current_thread_info()->cpu = 0;
smp_tune_scheduling();
@@ -236,23 +251,28 @@ void __devinit smp_prepare_boot_cpu(void)
}
/*
- * Startup the CPU with this logical number
+ * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
+ * and keep control until "cpu_online(cpu)" is set. Note: cpu is
+ * physical, not logical.
*/
-static int __init do_boot_cpu(int cpu)
+int __devinit __cpu_up(unsigned int cpu)
{
struct task_struct *idle;
/*
+ * Processor goes to start_secondary(), sets online flag
* The following code is purely to make sure
* Linux can schedule processes on this slave.
*/
idle = fork_idle(cpu);
if (IS_ERR(idle))
- panic("failed fork for CPU %d\n", cpu);
+ panic(KERN_ERR "Fork failed for CPU %d", cpu);
prom_boot_secondary(cpu, idle);
- /* XXXKW timeout */
+ /*
+ * Trust is futile. We should really have timeouts ...
+ */
while (!cpu_isset(cpu, cpu_callin_map))
udelay(100);
@@ -261,23 +281,6 @@ static int __init do_boot_cpu(int cpu)
return 0;
}
-/*
- * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
- * and keep control until "cpu_online(cpu)" is set. Note: cpu is
- * physical, not logical.
- */
-int __devinit __cpu_up(unsigned int cpu)
-{
- int ret;
-
- /* Processor goes to start_secondary(), sets online flag */
- ret = do_boot_cpu(cpu);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
/* Not really SMP stuff ... */
int setup_profiling_timer(unsigned int multiplier)
{
diff --git a/arch/mips/kernel/smp_mt.c b/arch/mips/kernel/smp_mt.c
new file mode 100644
index 000000000000..d429544ba4bc
--- /dev/null
+++ b/arch/mips/kernel/smp_mt.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
+ *
+ * Elizabeth Clarke (beth@mips.com)
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+
+#include <asm/atomic.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/time.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/cacheflush.h>
+#include <asm/mips-boards/maltaint.h>
+
+#define MIPS_CPU_IPI_RESCHED_IRQ 0
+#define MIPS_CPU_IPI_CALL_IRQ 1
+
+static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
+
+#if 0
+static void dump_mtregisters(int vpe, int tc)
+{
+ printk("vpe %d tc %d\n", vpe, tc);
+
+ settc(tc);
+
+ printk(" c0 status 0x%lx\n", read_vpe_c0_status());
+ printk(" vpecontrol 0x%lx\n", read_vpe_c0_vpecontrol());
+ printk(" vpeconf0 0x%lx\n", read_vpe_c0_vpeconf0());
+ printk(" tcstatus 0x%lx\n", read_tc_c0_tcstatus());
+ printk(" tcrestart 0x%lx\n", read_tc_c0_tcrestart());
+ printk(" tcbind 0x%lx\n", read_tc_c0_tcbind());
+ printk(" tchalt 0x%lx\n", read_tc_c0_tchalt());
+}
+#endif
+
+void __init sanitize_tlb_entries(void)
+{
+ int i, tlbsiz;
+ unsigned long mvpconf0, ncpu;
+
+ if (!cpu_has_mipsmt)
+ return;
+
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ /* Disable TLB sharing */
+ clear_c0_mvpcontrol(MVPCONTROL_STLB);
+
+ mvpconf0 = read_c0_mvpconf0();
+
+ printk(KERN_INFO "MVPConf0 0x%lx TLBS %lx PTLBE %ld\n", mvpconf0,
+ (mvpconf0 & MVPCONF0_TLBS) >> MVPCONF0_TLBS_SHIFT,
+ (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT);
+
+ tlbsiz = (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT;
+ ncpu = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+
+ printk(" tlbsiz %d ncpu %ld\n", tlbsiz, ncpu);
+
+ if (tlbsiz > 0) {
+ /* share them out across the vpe's */
+ tlbsiz /= ncpu;
+
+ printk(KERN_INFO "setting Config1.MMU_size to %d\n", tlbsiz);
+
+ for (i = 0; i < ncpu; i++) {
+ settc(i);
+
+ if (i == 0)
+ write_c0_config1((read_c0_config1() & ~(0x3f << 25)) | (tlbsiz << 25));
+ else
+ write_vpe_c0_config1((read_vpe_c0_config1() & ~(0x3f << 25)) |
+ (tlbsiz << 25));
+ }
+ }
+
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+}
+
+#if 0
+/*
+ * Use c0_MVPConf0 to find out how many CPUs are available, setting up
+ * phys_cpu_present_map and the logical/physical mappings.
+ */
+void __init prom_build_cpu_map(void)
+{
+ int i, num, ncpus;
+
+ cpus_clear(phys_cpu_present_map);
+
+ /* assume we boot on cpu 0.... */
+ cpu_set(0, phys_cpu_present_map);
+ __cpu_number_map[0] = 0;
+ __cpu_logical_map[0] = 0;
+
+ if (cpu_has_mipsmt) {
+ ncpus = ((read_c0_mvpconf0() & (MVPCONF0_PVPE)) >> MVPCONF0_PVPE_SHIFT) + 1;
+ for (i=1, num=0; i< NR_CPUS && i<ncpus; i++) {
+ cpu_set(i, phys_cpu_present_map);
+ __cpu_number_map[i] = ++num;
+ __cpu_logical_map[num] = i;
+ }
+
+ printk(KERN_INFO "%i available secondary CPU(s)\n", num);
+ }
+}
+#endif
+
+static void ipi_resched_dispatch (struct pt_regs *regs)
+{
+ do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs);
+}
+
+static void ipi_call_dispatch (struct pt_regs *regs)
+{
+ do_IRQ(MIPS_CPU_IPI_CALL_IRQ, regs);
+}
+
+irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ return IRQ_HANDLED;
+}
+
+irqreturn_t ipi_call_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ smp_call_function_interrupt();
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction irq_resched = {
+ .handler = ipi_resched_interrupt,
+ .flags = SA_INTERRUPT,
+ .name = "IPI_resched"
+};
+
+static struct irqaction irq_call = {
+ .handler = ipi_call_interrupt,
+ .flags = SA_INTERRUPT,
+ .name = "IPI_call"
+};
+
+/*
+ * Common setup before any secondaries are started
+ * Make sure all CPU's are in a sensible state before we boot any of the
+ * secondarys
+ */
+void prom_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned long val;
+ int i, num;
+
+ if (!cpu_has_mipsmt)
+ return;
+
+ /* disable MT so we can configure */
+ dvpe();
+ dmt();
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ val = read_c0_mvpconf0();
+
+ /* we'll always have more TC's than VPE's, so loop setting everything
+ to a sensible state */
+ for (i = 0, num = 0; i <= ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT); i++) {
+ settc(i);
+
+ /* VPE's */
+ if (i <= ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) {
+
+ /* deactivate all but vpe0 */
+ if (i != 0) {
+ unsigned long tmp = read_vpe_c0_vpeconf0();
+
+ tmp &= ~VPECONF0_VPA;
+
+ /* master VPE */
+ tmp |= VPECONF0_MVP;
+ write_vpe_c0_vpeconf0(tmp);
+
+ /* Record this as available CPU */
+ if (i < max_cpus) {
+ cpu_set(i, phys_cpu_present_map);
+ __cpu_number_map[i] = ++num;
+ __cpu_logical_map[num] = i;
+ }
+ }
+
+ /* disable multi-threading with TC's */
+ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
+
+ if (i != 0) {
+ write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
+ write_vpe_c0_cause(read_vpe_c0_cause() & ~CAUSEF_IP);
+
+ /* set config to be the same as vpe0, particularly kseg0 coherency alg */
+ write_vpe_c0_config( read_c0_config());
+ }
+
+ }
+
+ /* TC's */
+
+ if (i != 0) {
+ unsigned long tmp;
+
+ /* bind a TC to each VPE, May as well put all excess TC's
+ on the last VPE */
+ if ( i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1) )
+ write_tc_c0_tcbind(read_tc_c0_tcbind() | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) );
+ else {
+ write_tc_c0_tcbind( read_tc_c0_tcbind() | i);
+
+ /* and set XTC */
+ write_vpe_c0_vpeconf0( read_vpe_c0_vpeconf0() | (i << VPECONF0_XTC_SHIFT));
+ }
+
+ tmp = read_tc_c0_tcstatus();
+
+ /* mark not allocated and not dynamically allocatable */
+ tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
+ tmp |= TCSTATUS_IXMT; /* interrupt exempt */
+ write_tc_c0_tcstatus(tmp);
+
+ write_tc_c0_tchalt(TCHALT_H);
+ }
+ }
+
+ /* Release config state */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ /* We'll wait until starting the secondaries before starting MVPE */
+
+ printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
+
+ /* set up ipi interrupts */
+ if (cpu_has_vint) {
+ set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
+ set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
+ }
+
+ cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
+ cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ;
+
+ setup_irq(cpu_ipi_resched_irq, &irq_resched);
+ setup_irq(cpu_ipi_call_irq, &irq_call);
+
+ /* need to mark IPI's as IRQ_PER_CPU */
+ irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU;
+ irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU;
+}
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it
+ * running!
+ * smp_bootstrap is the place to resume from
+ * __KSTK_TOS(idle) is apparently the stack pointer
+ * (unsigned long)idle->thread_info the gp
+ * assumes a 1:1 mapping of TC => VPE
+ */
+void prom_boot_secondary(int cpu, struct task_struct *idle)
+{
+ dvpe();
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ settc(cpu);
+
+ /* restart */
+ write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
+
+ /* enable the tc this vpe/cpu will be running */
+ write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A);
+
+ write_tc_c0_tchalt(0);
+
+ /* enable the VPE */
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
+
+ /* stack pointer */
+ write_tc_gpr_sp( __KSTK_TOS(idle));
+
+ /* global pointer */
+ write_tc_gpr_gp((unsigned long)idle->thread_info);
+
+ flush_icache_range((unsigned long)idle->thread_info,
+ (unsigned long)idle->thread_info +
+ sizeof(struct thread_info));
+
+ /* finally out of configuration and into chaos */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ evpe(EVPE_ENABLE);
+}
+
+void prom_init_secondary(void)
+{
+ write_c0_status((read_c0_status() & ~ST0_IM ) |
+ (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7));
+}
+
+void prom_smp_finish(void)
+{
+ write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
+
+ local_irq_enable();
+}
+
+void prom_cpus_done(void)
+{
+}
+
+void core_send_ipi(int cpu, unsigned int action)
+{
+ int i;
+ unsigned long flags;
+ int vpflags;
+
+ local_irq_save (flags);
+
+ vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */
+
+ switch (action) {
+ case SMP_CALL_FUNCTION:
+ i = C_SW1;
+ break;
+
+ case SMP_RESCHEDULE_YOURSELF:
+ default:
+ i = C_SW0;
+ break;
+ }
+
+ /* 1:1 mapping of vpe and tc... */
+ settc(cpu);
+ write_vpe_c0_cause(read_vpe_c0_cause() | i);
+ evpe(vpflags);
+
+ local_irq_restore(flags);
+}
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 21e3e13a4b44..ee98eeb65e85 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -7,6 +7,7 @@
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
*/
+#include <linux/config.h>
#include <linux/a.out.h>
#include <linux/errno.h>
#include <linux/linkage.h>
@@ -26,6 +27,7 @@
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/compiler.h>
+#include <linux/module.h>
#include <asm/branch.h>
#include <asm/cachectl.h>
@@ -56,6 +58,8 @@ out:
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
+EXPORT_SYMBOL(shm_align_mask);
+
#define COLOUR_ALIGN(addr,pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
@@ -173,14 +177,28 @@ _sys_clone(nabi_no_regargs struct pt_regs regs)
{
unsigned long clone_flags;
unsigned long newsp;
- int *parent_tidptr, *child_tidptr;
+ int __user *parent_tidptr, *child_tidptr;
clone_flags = regs.regs[4];
newsp = regs.regs[5];
if (!newsp)
newsp = regs.regs[29];
- parent_tidptr = (int *) regs.regs[6];
- child_tidptr = (int *) regs.regs[7];
+ parent_tidptr = (int __user *) regs.regs[6];
+#ifdef CONFIG_32BIT
+ /* We need to fetch the fifth argument off the stack. */
+ child_tidptr = NULL;
+ if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) {
+ int __user *__user *usp = (int __user *__user *) regs.regs[29];
+ if (regs.regs[2] == __NR_syscall) {
+ if (get_user (child_tidptr, &usp[5]))
+ return -EFAULT;
+ }
+ else if (get_user (child_tidptr, &usp[4]))
+ return -EFAULT;
+ }
+#else
+ child_tidptr = (int __user *) regs.regs[8];
+#endif
return do_fork(clone_flags, newsp, &regs, 0,
parent_tidptr, child_tidptr);
}
@@ -242,6 +260,16 @@ asmlinkage int sys_olduname(struct oldold_utsname * name)
return error;
}
+void sys_set_thread_area(unsigned long addr)
+{
+ struct thread_info *ti = current->thread_info;
+
+ ti->tp_value = addr;
+
+ /* If some future MIPS implementation has this register in hardware,
+ * we will need to update it here (and in context switches). */
+}
+
asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
{
int tmp, len;
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 7ae4af476974..52924f8ce23c 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -73,32 +73,30 @@ asmlinkage int irix_sysmp(struct pt_regs *regs)
}
/* The prctl commands. */
-#define PR_MAXPROCS 1 /* Tasks/user. */
-#define PR_ISBLOCKED 2 /* If blocked, return 1. */
-#define PR_SETSTACKSIZE 3 /* Set largest task stack size. */
-#define PR_GETSTACKSIZE 4 /* Get largest task stack size. */
-#define PR_MAXPPROCS 5 /* Num parallel tasks. */
-#define PR_UNBLKONEXEC 6 /* When task exec/exit's, unblock. */
-#define PR_SETEXITSIG 8 /* When task exit's, set signal. */
-#define PR_RESIDENT 9 /* Make task unswappable. */
-#define PR_ATTACHADDR 10 /* (Re-)Connect a vma to a task. */
-#define PR_DETACHADDR 11 /* Disconnect a vma from a task. */
-#define PR_TERMCHILD 12 /* When parent sleeps with fishes, kill child. */
-#define PR_GETSHMASK 13 /* Get the sproc() share mask. */
-#define PR_GETNSHARE 14 /* Number of share group members. */
-#define PR_COREPID 15 /* Add task pid to name when it core. */
-#define PR_ATTACHADDRPERM 16 /* (Re-)Connect vma, with specified prot. */
-#define PR_PTHREADEXIT 17 /* Kill a pthread without prejudice. */
-
-asmlinkage int irix_prctl(struct pt_regs *regs)
-{
- unsigned long cmd;
- int error = 0, base = 0;
+#define PR_MAXPROCS 1 /* Tasks/user. */
+#define PR_ISBLOCKED 2 /* If blocked, return 1. */
+#define PR_SETSTACKSIZE 3 /* Set largest task stack size. */
+#define PR_GETSTACKSIZE 4 /* Get largest task stack size. */
+#define PR_MAXPPROCS 5 /* Num parallel tasks. */
+#define PR_UNBLKONEXEC 6 /* When task exec/exit's, unblock. */
+#define PR_SETEXITSIG 8 /* When task exit's, set signal. */
+#define PR_RESIDENT 9 /* Make task unswappable. */
+#define PR_ATTACHADDR 10 /* (Re-)Connect a vma to a task. */
+#define PR_DETACHADDR 11 /* Disconnect a vma from a task. */
+#define PR_TERMCHILD 12 /* Kill child if the parent dies. */
+#define PR_GETSHMASK 13 /* Get the sproc() share mask. */
+#define PR_GETNSHARE 14 /* Number of share group members. */
+#define PR_COREPID 15 /* Add task pid to name when it core. */
+#define PR_ATTACHADDRPERM 16 /* (Re-)Connect vma, with specified prot. */
+#define PR_PTHREADEXIT 17 /* Kill a pthread, only for IRIX 6.[234] */
+
+asmlinkage int irix_prctl(unsigned option, ...)
+{
+ va_list args;
+ int error = 0;
- if (regs->regs[2] == 1000)
- base = 1;
- cmd = regs->regs[base + 4];
- switch (cmd) {
+ va_start(args, option);
+ switch (option) {
case PR_MAXPROCS:
printk("irix_prctl[%s:%d]: Wants PR_MAXPROCS\n",
current->comm, current->pid);
@@ -111,7 +109,7 @@ asmlinkage int irix_prctl(struct pt_regs *regs)
printk("irix_prctl[%s:%d]: Wants PR_ISBLOCKED\n",
current->comm, current->pid);
read_lock(&tasklist_lock);
- task = find_task_by_pid(regs->regs[base + 5]);
+ task = find_task_by_pid(va_arg(args, pid_t));
error = -ESRCH;
if (error)
error = (task->run_list.next != NULL);
@@ -121,7 +119,7 @@ asmlinkage int irix_prctl(struct pt_regs *regs)
}
case PR_SETSTACKSIZE: {
- long value = regs->regs[base + 5];
+ long value = va_arg(args, long);
printk("irix_prctl[%s:%d]: Wants PR_SETSTACKSIZE<%08lx>\n",
current->comm, current->pid, (unsigned long) value);
@@ -222,24 +220,20 @@ asmlinkage int irix_prctl(struct pt_regs *regs)
error = -EINVAL;
break;
- case PR_PTHREADEXIT:
- printk("irix_prctl[%s:%d]: Wants PR_PTHREADEXIT\n",
- current->comm, current->pid);
- do_exit(regs->regs[base + 5]);
-
default:
printk("irix_prctl[%s:%d]: Non-existant opcode %d\n",
- current->comm, current->pid, (int)cmd);
+ current->comm, current->pid, option);
error = -EINVAL;
break;
}
+ va_end(args);
return error;
}
#undef DEBUG_PROCGRPS
-extern unsigned long irix_mapelf(int fd, struct elf_phdr *user_phdrp, int cnt);
+extern unsigned long irix_mapelf(int fd, struct elf_phdr __user *user_phdrp, int cnt);
extern int getrusage(struct task_struct *p, int who, struct rusage __user *ru);
extern char *prom_getenv(char *name);
extern long prom_setenv(char *name, char *value);
@@ -276,23 +270,19 @@ asmlinkage int irix_syssgi(struct pt_regs *regs)
cmd = regs->regs[base + 4];
switch(cmd) {
case SGI_SYSID: {
- char *buf = (char *) regs->regs[base + 5];
+ char __user *buf = (char __user *) regs->regs[base + 5];
/* XXX Use ethernet addr.... */
- retval = clear_user(buf, 64);
+ retval = clear_user(buf, 64) ? -EFAULT : 0;
break;
}
#if 0
case SGI_RDNAME: {
int pid = (int) regs->regs[base + 5];
- char *buf = (char *) regs->regs[base + 6];
+ char __user *buf = (char __user *) regs->regs[base + 6];
struct task_struct *p;
char tcomm[sizeof(current->comm)];
- if (!access_ok(VERIFY_WRITE, buf, sizeof(tcomm))) {
- retval = -EFAULT;
- break;
- }
read_lock(&tasklist_lock);
p = find_task_by_pid(pid);
if (!p) {
@@ -304,34 +294,28 @@ asmlinkage int irix_syssgi(struct pt_regs *regs)
read_unlock(&tasklist_lock);
/* XXX Need to check sizes. */
- copy_to_user(buf, tcomm, sizeof(tcomm));
- retval = 0;
+ retval = copy_to_user(buf, tcomm, sizeof(tcomm)) ? -EFAULT : 0;
break;
}
case SGI_GETNVRAM: {
- char *name = (char *) regs->regs[base+5];
- char *buf = (char *) regs->regs[base+6];
+ char __user *name = (char __user *) regs->regs[base+5];
+ char __user *buf = (char __user *) regs->regs[base+6];
char *value;
return -EINVAL; /* til I fix it */
- if (!access_ok(VERIFY_WRITE, buf, 128)) {
- retval = -EFAULT;
- break;
- }
value = prom_getenv(name); /* PROM lock? */
if (!value) {
retval = -EINVAL;
break;
}
/* Do I strlen() for the length? */
- copy_to_user(buf, value, 128);
- retval = 0;
+ retval = copy_to_user(buf, value, 128) ? -EFAULT : 0;
break;
}
case SGI_SETNVRAM: {
- char *name = (char *) regs->regs[base+5];
- char *value = (char *) regs->regs[base+6];
+ char __user *name = (char __user *) regs->regs[base+5];
+ char __user *value = (char __user *) regs->regs[base+6];
return -EINVAL; /* til I fix it */
retval = prom_setenv(name, value);
/* XXX make sure retval conforms to syssgi(2) */
@@ -407,16 +391,16 @@ asmlinkage int irix_syssgi(struct pt_regs *regs)
case SGI_SETGROUPS:
retval = sys_setgroups((int) regs->regs[base + 5],
- (gid_t *) regs->regs[base + 6]);
+ (gid_t __user *) regs->regs[base + 6]);
break;
case SGI_GETGROUPS:
retval = sys_getgroups((int) regs->regs[base + 5],
- (gid_t *) regs->regs[base + 6]);
+ (gid_t __user *) regs->regs[base + 6]);
break;
case SGI_RUSAGE: {
- struct rusage *ru = (struct rusage *) regs->regs[base + 6];
+ struct rusage __user *ru = (struct rusage __user *) regs->regs[base + 6];
switch((int) regs->regs[base + 5]) {
case 0:
@@ -453,7 +437,7 @@ asmlinkage int irix_syssgi(struct pt_regs *regs)
case SGI_ELFMAP:
retval = irix_mapelf((int) regs->regs[base + 5],
- (struct elf_phdr *) regs->regs[base + 6],
+ (struct elf_phdr __user *) regs->regs[base + 6],
(int) regs->regs[base + 7]);
break;
@@ -468,24 +452,24 @@ asmlinkage int irix_syssgi(struct pt_regs *regs)
case SGI_PHYSP: {
unsigned long addr = regs->regs[base + 5];
- int *pageno = (int *) (regs->regs[base + 6]);
+ int __user *pageno = (int __user *) (regs->regs[base + 6]);
struct mm_struct *mm = current->mm;
pgd_t *pgdp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
- if (!access_ok(VERIFY_WRITE, pageno, sizeof(int)))
- return -EFAULT;
-
down_read(&mm->mmap_sem);
pgdp = pgd_offset(mm, addr);
- pmdp = pmd_offset(pgdp, addr);
+ pudp = pud_offset(pgdp, addr);
+ pmdp = pmd_offset(pudp, addr);
ptep = pte_offset(pmdp, addr);
retval = -EINVAL;
if (ptep) {
pte_t pte = *ptep;
if (pte_val(pte) & (_PAGE_VALID | _PAGE_PRESENT)) {
+ /* b0rked on 64-bit */
retval = put_user((pte_val(pte) & PAGE_MASK) >>
PAGE_SHIFT, pageno);
}
@@ -496,7 +480,7 @@ asmlinkage int irix_syssgi(struct pt_regs *regs)
case SGI_INVENT: {
int arg1 = (int) regs->regs [base + 5];
- void *buffer = (void *) regs->regs [base + 6];
+ void __user *buffer = (void __user *) regs->regs [base + 6];
int count = (int) regs->regs [base + 7];
switch (arg1) {
@@ -692,8 +676,8 @@ asmlinkage int irix_pause(void)
}
/* XXX need more than this... */
-asmlinkage int irix_mount(char *dev_name, char *dir_name, unsigned long flags,
- char *type, void *data, int datalen)
+asmlinkage int irix_mount(char __user *dev_name, char __user *dir_name,
+ unsigned long flags, char __user *type, void __user *data, int datalen)
{
printk("[%s:%d] irix_mount(%p,%p,%08lx,%p,%p,%d)\n",
current->comm, current->pid,
@@ -708,8 +692,8 @@ struct irix_statfs {
char f_fname[6], f_fpack[6];
};
-asmlinkage int irix_statfs(const char *path, struct irix_statfs *buf,
- int len, int fs_type)
+asmlinkage int irix_statfs(const char __user *path,
+ struct irix_statfs __user *buf, int len, int fs_type)
{
struct nameidata nd;
struct kstatfs kbuf;
@@ -724,6 +708,7 @@ asmlinkage int irix_statfs(const char *path, struct irix_statfs *buf,
error = -EFAULT;
goto out;
}
+
error = user_path_walk(path, &nd);
if (error)
goto out;
@@ -732,18 +717,17 @@ asmlinkage int irix_statfs(const char *path, struct irix_statfs *buf,
if (error)
goto dput_and_out;
- __put_user(kbuf.f_type, &buf->f_type);
- __put_user(kbuf.f_bsize, &buf->f_bsize);
- __put_user(kbuf.f_frsize, &buf->f_frsize);
- __put_user(kbuf.f_blocks, &buf->f_blocks);
- __put_user(kbuf.f_bfree, &buf->f_bfree);
- __put_user(kbuf.f_files, &buf->f_files);
- __put_user(kbuf.f_ffree, &buf->f_ffree);
+ error = __put_user(kbuf.f_type, &buf->f_type);
+ error |= __put_user(kbuf.f_bsize, &buf->f_bsize);
+ error |= __put_user(kbuf.f_frsize, &buf->f_frsize);
+ error |= __put_user(kbuf.f_blocks, &buf->f_blocks);
+ error |= __put_user(kbuf.f_bfree, &buf->f_bfree);
+ error |= __put_user(kbuf.f_files, &buf->f_files);
+ error |= __put_user(kbuf.f_ffree, &buf->f_ffree);
for (i = 0; i < 6; i++) {
- __put_user(0, &buf->f_fname[i]);
- __put_user(0, &buf->f_fpack[i]);
+ error |= __put_user(0, &buf->f_fname[i]);
+ error |= __put_user(0, &buf->f_fpack[i]);
}
- error = 0;
dput_and_out:
path_release(&nd);
@@ -751,7 +735,7 @@ out:
return error;
}
-asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs *buf)
+asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs __user *buf)
{
struct kstatfs kbuf;
struct file *file;
@@ -761,6 +745,7 @@ asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs *buf)
error = -EFAULT;
goto out;
}
+
if (!(file = fget(fd))) {
error = -EBADF;
goto out;
@@ -770,16 +755,17 @@ asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs *buf)
if (error)
goto out_f;
- __put_user(kbuf.f_type, &buf->f_type);
- __put_user(kbuf.f_bsize, &buf->f_bsize);
- __put_user(kbuf.f_frsize, &buf->f_frsize);
- __put_user(kbuf.f_blocks, &buf->f_blocks);
- __put_user(kbuf.f_bfree, &buf->f_bfree);
- __put_user(kbuf.f_files, &buf->f_files);
- __put_user(kbuf.f_ffree, &buf->f_ffree);
- for(i = 0; i < 6; i++) {
- __put_user(0, &buf->f_fname[i]);
- __put_user(0, &buf->f_fpack[i]);
+ error = __put_user(kbuf.f_type, &buf->f_type);
+ error |= __put_user(kbuf.f_bsize, &buf->f_bsize);
+ error |= __put_user(kbuf.f_frsize, &buf->f_frsize);
+ error |= __put_user(kbuf.f_blocks, &buf->f_blocks);
+ error |= __put_user(kbuf.f_bfree, &buf->f_bfree);
+ error |= __put_user(kbuf.f_files, &buf->f_files);
+ error |= __put_user(kbuf.f_ffree, &buf->f_ffree);
+
+ for (i = 0; i < 6; i++) {
+ error |= __put_user(0, &buf->f_fname[i]);
+ error |= __put_user(0, &buf->f_fpack[i]);
}
out_f:
@@ -806,14 +792,15 @@ asmlinkage int irix_setpgrp(int flags)
return error;
}
-asmlinkage int irix_times(struct tms * tbuf)
+asmlinkage int irix_times(struct tms __user *tbuf)
{
int err = 0;
if (tbuf) {
if (!access_ok(VERIFY_WRITE,tbuf,sizeof *tbuf))
return -EFAULT;
- err |= __put_user(current->utime, &tbuf->tms_utime);
+
+ err = __put_user(current->utime, &tbuf->tms_utime);
err |= __put_user(current->stime, &tbuf->tms_stime);
err |= __put_user(current->signal->cutime, &tbuf->tms_cutime);
err |= __put_user(current->signal->cstime, &tbuf->tms_cstime);
@@ -829,13 +816,13 @@ asmlinkage int irix_exec(struct pt_regs *regs)
if(regs->regs[2] == 1000)
base = 1;
- filename = getname((char *) (long)regs->regs[base + 4]);
+ filename = getname((char __user *) (long)regs->regs[base + 4]);
error = PTR_ERR(filename);
if (IS_ERR(filename))
return error;
- error = do_execve(filename, (char **) (long)regs->regs[base + 5],
- (char **) 0, regs);
+ error = do_execve(filename, (char __user * __user *) (long)regs->regs[base + 5],
+ NULL, regs);
putname(filename);
return error;
@@ -848,12 +835,12 @@ asmlinkage int irix_exece(struct pt_regs *regs)
if (regs->regs[2] == 1000)
base = 1;
- filename = getname((char *) (long)regs->regs[base + 4]);
+ filename = getname((char __user *) (long)regs->regs[base + 4]);
error = PTR_ERR(filename);
if (IS_ERR(filename))
return error;
- error = do_execve(filename, (char **) (long)regs->regs[base + 5],
- (char **) (long)regs->regs[base + 6], regs);
+ error = do_execve(filename, (char __user * __user *) (long)regs->regs[base + 5],
+ (char __user * __user *) (long)regs->regs[base + 6], regs);
putname(filename);
return error;
@@ -909,22 +896,17 @@ asmlinkage int irix_socket(int family, int type, int protocol)
return sys_socket(family, type, protocol);
}
-asmlinkage int irix_getdomainname(char *name, int len)
+asmlinkage int irix_getdomainname(char __user *name, int len)
{
- int error;
-
- if (!access_ok(VERIFY_WRITE, name, len))
- return -EFAULT;
+ int err;
down_read(&uts_sem);
if (len > __NEW_UTS_LEN)
len = __NEW_UTS_LEN;
- error = 0;
- if (copy_to_user(name, system_utsname.domainname, len))
- error = -EFAULT;
+ err = copy_to_user(name, system_utsname.domainname, len) ? -EFAULT : 0;
up_read(&uts_sem);
- return error;
+ return err;
}
asmlinkage unsigned long irix_getpagesize(void)
@@ -940,12 +922,13 @@ asmlinkage int irix_msgsys(int opcode, unsigned long arg0, unsigned long arg1,
case 0:
return sys_msgget((key_t) arg0, (int) arg1);
case 1:
- return sys_msgctl((int) arg0, (int) arg1, (struct msqid_ds *)arg2);
+ return sys_msgctl((int) arg0, (int) arg1,
+ (struct msqid_ds __user *)arg2);
case 2:
- return sys_msgrcv((int) arg0, (struct msgbuf *) arg1,
+ return sys_msgrcv((int) arg0, (struct msgbuf __user *) arg1,
(size_t) arg2, (long) arg3, (int) arg4);
case 3:
- return sys_msgsnd((int) arg0, (struct msgbuf *) arg1,
+ return sys_msgsnd((int) arg0, (struct msgbuf __user *) arg1,
(size_t) arg2, (int) arg3);
default:
return -EINVAL;
@@ -957,12 +940,13 @@ asmlinkage int irix_shmsys(int opcode, unsigned long arg0, unsigned long arg1,
{
switch (opcode) {
case 0:
- return do_shmat((int) arg0, (char *)arg1, (int) arg2,
+ return do_shmat((int) arg0, (char __user *) arg1, (int) arg2,
(unsigned long *) arg3);
case 1:
- return sys_shmctl((int)arg0, (int)arg1, (struct shmid_ds *)arg2);
+ return sys_shmctl((int)arg0, (int)arg1,
+ (struct shmid_ds __user *)arg2);
case 2:
- return sys_shmdt((char *)arg0);
+ return sys_shmdt((char __user *)arg0);
case 3:
return sys_shmget((key_t) arg0, (int) arg1, (int) arg2);
default:
@@ -980,7 +964,7 @@ asmlinkage int irix_semsys(int opcode, unsigned long arg0, unsigned long arg1,
case 1:
return sys_semget((key_t) arg0, (int) arg1, (int) arg2);
case 2:
- return sys_semop((int) arg0, (struct sembuf *)arg1,
+ return sys_semop((int) arg0, (struct sembuf __user *)arg1,
(unsigned int) arg2);
default:
return -EINVAL;
@@ -998,15 +982,16 @@ static inline loff_t llseek(struct file *file, loff_t offset, int origin)
lock_kernel();
retval = fn(file, offset, origin);
unlock_kernel();
+
return retval;
}
asmlinkage int irix_lseek64(int fd, int _unused, int offhi, int offlow,
int origin)
{
- int retval;
struct file * file;
loff_t offset;
+ int retval;
retval = -EBADF;
file = fget(fd);
@@ -1031,12 +1016,12 @@ asmlinkage int irix_sginap(int ticks)
return 0;
}
-asmlinkage int irix_sgikopt(char *istring, char *ostring, int len)
+asmlinkage int irix_sgikopt(char __user *istring, char __user *ostring, int len)
{
return -EINVAL;
}
-asmlinkage int irix_gettimeofday(struct timeval *tv)
+asmlinkage int irix_gettimeofday(struct timeval __user *tv)
{
time_t sec;
long nsec, seq;
@@ -1077,7 +1062,7 @@ asmlinkage unsigned long irix_mmap32(unsigned long addr, size_t len, int prot,
if (max_size > file->f_dentry->d_inode->i_size) {
old_pos = sys_lseek (fd, max_size - 1, 0);
- sys_write (fd, "", 1);
+ sys_write (fd, (void __user *) "", 1);
sys_lseek (fd, old_pos, 0);
}
}
@@ -1102,7 +1087,7 @@ asmlinkage int irix_madvise(unsigned long addr, int len, int behavior)
return -EINVAL;
}
-asmlinkage int irix_pagelock(char *addr, int len, int op)
+asmlinkage int irix_pagelock(char __user *addr, int len, int op)
{
printk("[%s:%d] Wheee.. irix_pagelock(%p,%d,%d)\n",
current->comm, current->pid, addr, len, op);
@@ -1142,7 +1127,7 @@ asmlinkage int irix_BSDsetpgrp(int pid, int pgrp)
return error;
}
-asmlinkage int irix_systeminfo(int cmd, char *buf, int cnt)
+asmlinkage int irix_systeminfo(int cmd, char __user *buf, int cnt)
{
printk("[%s:%d] Wheee.. irix_systeminfo(%d,%p,%d)\n",
current->comm, current->pid, cmd, buf, cnt);
@@ -1158,14 +1143,14 @@ struct iuname {
char _unused3[257], _unused4[257], _unused5[257];
};
-asmlinkage int irix_uname(struct iuname *buf)
+asmlinkage int irix_uname(struct iuname __user *buf)
{
down_read(&uts_sem);
- if (copy_to_user(system_utsname.sysname, buf->sysname, 65)
- || copy_to_user(system_utsname.nodename, buf->nodename, 65)
- || copy_to_user(system_utsname.release, buf->release, 65)
- || copy_to_user(system_utsname.version, buf->version, 65)
- || copy_to_user(system_utsname.machine, buf->machine, 65)) {
+ if (copy_from_user(system_utsname.sysname, buf->sysname, 65)
+ || copy_from_user(system_utsname.nodename, buf->nodename, 65)
+ || copy_from_user(system_utsname.release, buf->release, 65)
+ || copy_from_user(system_utsname.version, buf->version, 65)
+ || copy_from_user(system_utsname.machine, buf->machine, 65)) {
return -EFAULT;
}
up_read(&uts_sem);
@@ -1175,7 +1160,7 @@ asmlinkage int irix_uname(struct iuname *buf)
#undef DEBUG_XSTAT
-static int irix_xstat32_xlate(struct kstat *stat, void *ubuf)
+static int irix_xstat32_xlate(struct kstat *stat, void __user *ubuf)
{
struct xstat32 {
u32 st_dev, st_pad1[3], st_ino, st_mode, st_nlink, st_uid, st_gid;
@@ -1215,7 +1200,7 @@ static int irix_xstat32_xlate(struct kstat *stat, void *ubuf)
return copy_to_user(ubuf, &ub, sizeof(ub)) ? -EFAULT : 0;
}
-static int irix_xstat64_xlate(struct kstat *stat, void *ubuf)
+static int irix_xstat64_xlate(struct kstat *stat, void __user *ubuf)
{
struct xstat64 {
u32 st_dev; s32 st_pad1[3];
@@ -1265,7 +1250,7 @@ static int irix_xstat64_xlate(struct kstat *stat, void *ubuf)
return copy_to_user(ubuf, &ks, sizeof(ks)) ? -EFAULT : 0;
}
-asmlinkage int irix_xstat(int version, char *filename, struct stat *statbuf)
+asmlinkage int irix_xstat(int version, char __user *filename, struct stat __user *statbuf)
{
int retval;
struct kstat stat;
@@ -1291,7 +1276,7 @@ asmlinkage int irix_xstat(int version, char *filename, struct stat *statbuf)
return retval;
}
-asmlinkage int irix_lxstat(int version, char *filename, struct stat *statbuf)
+asmlinkage int irix_lxstat(int version, char __user *filename, struct stat __user *statbuf)
{
int error;
struct kstat stat;
@@ -1318,7 +1303,7 @@ asmlinkage int irix_lxstat(int version, char *filename, struct stat *statbuf)
return error;
}
-asmlinkage int irix_fxstat(int version, int fd, struct stat *statbuf)
+asmlinkage int irix_fxstat(int version, int fd, struct stat __user *statbuf)
{
int error;
struct kstat stat;
@@ -1344,7 +1329,7 @@ asmlinkage int irix_fxstat(int version, int fd, struct stat *statbuf)
return error;
}
-asmlinkage int irix_xmknod(int ver, char *filename, int mode, unsigned dev)
+asmlinkage int irix_xmknod(int ver, char __user *filename, int mode, unsigned dev)
{
int retval;
printk("[%s:%d] Wheee.. irix_xmknod(%d,%s,%x,%x)\n",
@@ -1364,7 +1349,7 @@ asmlinkage int irix_xmknod(int ver, char *filename, int mode, unsigned dev)
return retval;
}
-asmlinkage int irix_swapctl(int cmd, char *arg)
+asmlinkage int irix_swapctl(int cmd, char __user *arg)
{
printk("[%s:%d] Wheee.. irix_swapctl(%d,%p)\n",
current->comm, current->pid, cmd, arg);
@@ -1380,7 +1365,7 @@ struct irix_statvfs {
char f_fstr[32]; u32 f_filler[16];
};
-asmlinkage int irix_statvfs(char *fname, struct irix_statvfs *buf)
+asmlinkage int irix_statvfs(char __user *fname, struct irix_statvfs __user *buf)
{
struct nameidata nd;
struct kstatfs kbuf;
@@ -1388,10 +1373,9 @@ asmlinkage int irix_statvfs(char *fname, struct irix_statvfs *buf)
printk("[%s:%d] Wheee.. irix_statvfs(%s,%p)\n",
current->comm, current->pid, fname, buf);
- if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs))) {
- error = -EFAULT;
- goto out;
- }
+ if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs)))
+ return -EFAULT;
+
error = user_path_walk(fname, &nd);
if (error)
goto out;
@@ -1399,27 +1383,25 @@ asmlinkage int irix_statvfs(char *fname, struct irix_statvfs *buf)
if (error)
goto dput_and_out;
- __put_user(kbuf.f_bsize, &buf->f_bsize);
- __put_user(kbuf.f_frsize, &buf->f_frsize);
- __put_user(kbuf.f_blocks, &buf->f_blocks);
- __put_user(kbuf.f_bfree, &buf->f_bfree);
- __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
- __put_user(kbuf.f_files, &buf->f_files);
- __put_user(kbuf.f_ffree, &buf->f_ffree);
- __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
+ error |= __put_user(kbuf.f_bsize, &buf->f_bsize);
+ error |= __put_user(kbuf.f_frsize, &buf->f_frsize);
+ error |= __put_user(kbuf.f_blocks, &buf->f_blocks);
+ error |= __put_user(kbuf.f_bfree, &buf->f_bfree);
+ error |= __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
+ error |= __put_user(kbuf.f_files, &buf->f_files);
+ error |= __put_user(kbuf.f_ffree, &buf->f_ffree);
+ error |= __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
#ifdef __MIPSEB__
- __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
+ error |= __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
#else
- __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
+ error |= __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
#endif
for (i = 0; i < 16; i++)
- __put_user(0, &buf->f_basetype[i]);
- __put_user(0, &buf->f_flag);
- __put_user(kbuf.f_namelen, &buf->f_namemax);
+ error |= __put_user(0, &buf->f_basetype[i]);
+ error |= __put_user(0, &buf->f_flag);
+ error |= __put_user(kbuf.f_namelen, &buf->f_namemax);
for (i = 0; i < 32; i++)
- __put_user(0, &buf->f_fstr[i]);
-
- error = 0;
+ error |= __put_user(0, &buf->f_fstr[i]);
dput_and_out:
path_release(&nd);
@@ -1427,7 +1409,7 @@ out:
return error;
}
-asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs *buf)
+asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs __user *buf)
{
struct kstatfs kbuf;
struct file *file;
@@ -1436,10 +1418,9 @@ asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs *buf)
printk("[%s:%d] Wheee.. irix_fstatvfs(%d,%p)\n",
current->comm, current->pid, fd, buf);
- if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs))) {
- error = -EFAULT;
- goto out;
- }
+ if (!access_ok(VERIFY_WRITE, buf, sizeof(struct irix_statvfs)))
+ return -EFAULT;
+
if (!(file = fget(fd))) {
error = -EBADF;
goto out;
@@ -1448,24 +1429,24 @@ asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs *buf)
if (error)
goto out_f;
- __put_user(kbuf.f_bsize, &buf->f_bsize);
- __put_user(kbuf.f_frsize, &buf->f_frsize);
- __put_user(kbuf.f_blocks, &buf->f_blocks);
- __put_user(kbuf.f_bfree, &buf->f_bfree);
- __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
- __put_user(kbuf.f_files, &buf->f_files);
- __put_user(kbuf.f_ffree, &buf->f_ffree);
- __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
+ error = __put_user(kbuf.f_bsize, &buf->f_bsize);
+ error |= __put_user(kbuf.f_frsize, &buf->f_frsize);
+ error |= __put_user(kbuf.f_blocks, &buf->f_blocks);
+ error |= __put_user(kbuf.f_bfree, &buf->f_bfree);
+ error |= __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
+ error |= __put_user(kbuf.f_files, &buf->f_files);
+ error |= __put_user(kbuf.f_ffree, &buf->f_ffree);
+ error |= __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
#ifdef __MIPSEB__
- __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
+ error |= __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
#else
- __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
+ error |= __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
#endif
for(i = 0; i < 16; i++)
- __put_user(0, &buf->f_basetype[i]);
- __put_user(0, &buf->f_flag);
- __put_user(kbuf.f_namelen, &buf->f_namemax);
- __clear_user(&buf->f_fstr, sizeof(buf->f_fstr));
+ error |= __put_user(0, &buf->f_basetype[i]);
+ error |= __put_user(0, &buf->f_flag);
+ error |= __put_user(kbuf.f_namelen, &buf->f_namemax);
+ error |= __clear_user(&buf->f_fstr, sizeof(buf->f_fstr)) ? -EFAULT : 0;
out_f:
fput(file);
@@ -1489,7 +1470,7 @@ asmlinkage int irix_sigqueue(int pid, int sig, int code, int val)
return -EINVAL;
}
-asmlinkage int irix_truncate64(char *name, int pad, int size1, int size2)
+asmlinkage int irix_truncate64(char __user *name, int pad, int size1, int size2)
{
int retval;
@@ -1522,6 +1503,7 @@ asmlinkage int irix_mmap64(struct pt_regs *regs)
int len, prot, flags, fd, off1, off2, error, base = 0;
unsigned long addr, pgoff, *sp;
struct file *file = NULL;
+ int err;
if (regs->regs[2] == 1000)
base = 1;
@@ -1531,36 +1513,31 @@ asmlinkage int irix_mmap64(struct pt_regs *regs)
prot = regs->regs[base + 6];
if (!base) {
flags = regs->regs[base + 7];
- if (!access_ok(VERIFY_READ, sp, (4 * sizeof(unsigned long)))) {
- error = -EFAULT;
- goto out;
- }
+ if (!access_ok(VERIFY_READ, sp, (4 * sizeof(unsigned long))))
+ return -EFAULT;
fd = sp[0];
- __get_user(off1, &sp[1]);
- __get_user(off2, &sp[2]);
+ err = __get_user(off1, &sp[1]);
+ err |= __get_user(off2, &sp[2]);
} else {
- if (!access_ok(VERIFY_READ, sp, (5 * sizeof(unsigned long)))) {
- error = -EFAULT;
- goto out;
- }
- __get_user(flags, &sp[0]);
- __get_user(fd, &sp[1]);
- __get_user(off1, &sp[2]);
- __get_user(off2, &sp[3]);
+ if (!access_ok(VERIFY_READ, sp, (5 * sizeof(unsigned long))))
+ return -EFAULT;
+ err = __get_user(flags, &sp[0]);
+ err |= __get_user(fd, &sp[1]);
+ err |= __get_user(off1, &sp[2]);
+ err |= __get_user(off2, &sp[3]);
}
- if (off1 & PAGE_MASK) {
- error = -EOVERFLOW;
- goto out;
- }
+ if (err)
+ return err;
+
+ if (off1 & PAGE_MASK)
+ return -EOVERFLOW;
pgoff = (off1 << (32 - PAGE_SHIFT)) | (off2 >> PAGE_SHIFT);
if (!(flags & MAP_ANONYMOUS)) {
- if (!(file = fget(fd))) {
- error = -EBADF;
- goto out;
- }
+ if (!(file = fget(fd)))
+ return -EBADF;
/* Ok, bad taste hack follows, try to think in something else
when reading this */
@@ -1570,7 +1547,7 @@ asmlinkage int irix_mmap64(struct pt_regs *regs)
if (max_size > file->f_dentry->d_inode->i_size) {
old_pos = sys_lseek (fd, max_size - 1, 0);
- sys_write (fd, "", 1);
+ sys_write (fd, (void __user *) "", 1);
sys_lseek (fd, old_pos, 0);
}
}
@@ -1585,7 +1562,6 @@ asmlinkage int irix_mmap64(struct pt_regs *regs)
if (file)
fput(file);
-out:
return error;
}
@@ -1597,7 +1573,7 @@ asmlinkage int irix_dmi(struct pt_regs *regs)
return -EINVAL;
}
-asmlinkage int irix_pread(int fd, char *buf, int cnt, int off64,
+asmlinkage int irix_pread(int fd, char __user *buf, int cnt, int off64,
int off1, int off2)
{
printk("[%s:%d] Wheee.. irix_pread(%d,%p,%d,%d,%d,%d)\n",
@@ -1606,7 +1582,7 @@ asmlinkage int irix_pread(int fd, char *buf, int cnt, int off64,
return -EINVAL;
}
-asmlinkage int irix_pwrite(int fd, char *buf, int cnt, int off64,
+asmlinkage int irix_pwrite(int fd, char __user *buf, int cnt, int off64,
int off1, int off2)
{
printk("[%s:%d] Wheee.. irix_pwrite(%d,%p,%d,%d,%d,%d)\n",
@@ -1638,7 +1614,7 @@ struct irix_statvfs64 {
u32 f_filler[16];
};
-asmlinkage int irix_statvfs64(char *fname, struct irix_statvfs64 *buf)
+asmlinkage int irix_statvfs64(char __user *fname, struct irix_statvfs64 __user *buf)
{
struct nameidata nd;
struct kstatfs kbuf;
@@ -1650,6 +1626,7 @@ asmlinkage int irix_statvfs64(char *fname, struct irix_statvfs64 *buf)
error = -EFAULT;
goto out;
}
+
error = user_path_walk(fname, &nd);
if (error)
goto out;
@@ -1657,27 +1634,25 @@ asmlinkage int irix_statvfs64(char *fname, struct irix_statvfs64 *buf)
if (error)
goto dput_and_out;
- __put_user(kbuf.f_bsize, &buf->f_bsize);
- __put_user(kbuf.f_frsize, &buf->f_frsize);
- __put_user(kbuf.f_blocks, &buf->f_blocks);
- __put_user(kbuf.f_bfree, &buf->f_bfree);
- __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
- __put_user(kbuf.f_files, &buf->f_files);
- __put_user(kbuf.f_ffree, &buf->f_ffree);
- __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
+ error = __put_user(kbuf.f_bsize, &buf->f_bsize);
+ error |= __put_user(kbuf.f_frsize, &buf->f_frsize);
+ error |= __put_user(kbuf.f_blocks, &buf->f_blocks);
+ error |= __put_user(kbuf.f_bfree, &buf->f_bfree);
+ error |= __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
+ error |= __put_user(kbuf.f_files, &buf->f_files);
+ error |= __put_user(kbuf.f_ffree, &buf->f_ffree);
+ error |= __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
#ifdef __MIPSEB__
- __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
+ error |= __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
#else
- __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
+ error |= __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
#endif
for(i = 0; i < 16; i++)
- __put_user(0, &buf->f_basetype[i]);
- __put_user(0, &buf->f_flag);
- __put_user(kbuf.f_namelen, &buf->f_namemax);
+ error |= __put_user(0, &buf->f_basetype[i]);
+ error |= __put_user(0, &buf->f_flag);
+ error |= __put_user(kbuf.f_namelen, &buf->f_namemax);
for(i = 0; i < 32; i++)
- __put_user(0, &buf->f_fstr[i]);
-
- error = 0;
+ error |= __put_user(0, &buf->f_fstr[i]);
dput_and_out:
path_release(&nd);
@@ -1685,7 +1660,7 @@ out:
return error;
}
-asmlinkage int irix_fstatvfs64(int fd, struct irix_statvfs *buf)
+asmlinkage int irix_fstatvfs64(int fd, struct irix_statvfs __user *buf)
{
struct kstatfs kbuf;
struct file *file;
@@ -1706,24 +1681,24 @@ asmlinkage int irix_fstatvfs64(int fd, struct irix_statvfs *buf)
if (error)
goto out_f;
- __put_user(kbuf.f_bsize, &buf->f_bsize);
- __put_user(kbuf.f_frsize, &buf->f_frsize);
- __put_user(kbuf.f_blocks, &buf->f_blocks);
- __put_user(kbuf.f_bfree, &buf->f_bfree);
- __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
- __put_user(kbuf.f_files, &buf->f_files);
- __put_user(kbuf.f_ffree, &buf->f_ffree);
- __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
+ error = __put_user(kbuf.f_bsize, &buf->f_bsize);
+ error |= __put_user(kbuf.f_frsize, &buf->f_frsize);
+ error |= __put_user(kbuf.f_blocks, &buf->f_blocks);
+ error |= __put_user(kbuf.f_bfree, &buf->f_bfree);
+ error |= __put_user(kbuf.f_bfree, &buf->f_bavail); /* XXX hackety hack... */
+ error |= __put_user(kbuf.f_files, &buf->f_files);
+ error |= __put_user(kbuf.f_ffree, &buf->f_ffree);
+ error |= __put_user(kbuf.f_ffree, &buf->f_favail); /* XXX hackety hack... */
#ifdef __MIPSEB__
- __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
+ error |= __put_user(kbuf.f_fsid.val[1], &buf->f_fsid);
#else
- __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
+ error |= __put_user(kbuf.f_fsid.val[0], &buf->f_fsid);
#endif
for(i = 0; i < 16; i++)
- __put_user(0, &buf->f_basetype[i]);
- __put_user(0, &buf->f_flag);
- __put_user(kbuf.f_namelen, &buf->f_namemax);
- __clear_user(buf->f_fstr, sizeof(buf->f_fstr[i]));
+ error |= __put_user(0, &buf->f_basetype[i]);
+ error |= __put_user(0, &buf->f_flag);
+ error |= __put_user(kbuf.f_namelen, &buf->f_namemax);
+ error |= __clear_user(buf->f_fstr, sizeof(buf->f_fstr[i])) ? -EFAULT : 0;
out_f:
fput(file);
@@ -1731,9 +1706,9 @@ out:
return error;
}
-asmlinkage int irix_getmountid(char *fname, unsigned long *midbuf)
+asmlinkage int irix_getmountid(char __user *fname, unsigned long __user *midbuf)
{
- int err = 0;
+ int err;
printk("[%s:%d] irix_getmountid(%s, %p)\n",
current->comm, current->pid, fname, midbuf);
@@ -1746,7 +1721,7 @@ asmlinkage int irix_getmountid(char *fname, unsigned long *midbuf)
* fsid of the filesystem to try and make the right decision, but
* we don't have this so for now. XXX
*/
- err |= __put_user(0, &midbuf[0]);
+ err = __put_user(0, &midbuf[0]);
err |= __put_user(0, &midbuf[1]);
err |= __put_user(0, &midbuf[2]);
err |= __put_user(0, &midbuf[3]);
@@ -1773,8 +1748,8 @@ struct irix_dirent32 {
};
struct irix_dirent32_callback {
- struct irix_dirent32 *current_dir;
- struct irix_dirent32 *previous;
+ struct irix_dirent32 __user *current_dir;
+ struct irix_dirent32 __user *previous;
int count;
int error;
};
@@ -1782,13 +1757,13 @@ struct irix_dirent32_callback {
#define NAME_OFFSET32(de) ((int) ((de)->d_name - (char *) (de)))
#define ROUND_UP32(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1))
-static int irix_filldir32(void *__buf, const char *name, int namlen,
- loff_t offset, ino_t ino, unsigned int d_type)
+static int irix_filldir32(void *__buf, const char *name,
+ int namlen, loff_t offset, ino_t ino, unsigned int d_type)
{
- struct irix_dirent32 *dirent;
- struct irix_dirent32_callback *buf =
- (struct irix_dirent32_callback *)__buf;
+ struct irix_dirent32 __user *dirent;
+ struct irix_dirent32_callback *buf = __buf;
unsigned short reclen = ROUND_UP32(NAME_OFFSET32(dirent) + namlen + 1);
+ int err = 0;
#ifdef DEBUG_GETDENTS
printk("\nirix_filldir32[reclen<%d>namlen<%d>count<%d>]",
@@ -1799,25 +1774,26 @@ static int irix_filldir32(void *__buf, const char *name, int namlen,
return -EINVAL;
dirent = buf->previous;
if (dirent)
- __put_user(offset, &dirent->d_off);
+ err = __put_user(offset, &dirent->d_off);
dirent = buf->current_dir;
- buf->previous = dirent;
- __put_user(ino, &dirent->d_ino);
- __put_user(reclen, &dirent->d_reclen);
- copy_to_user(dirent->d_name, name, namlen);
- __put_user(0, &dirent->d_name[namlen]);
- ((char *) dirent) += reclen;
+ err |= __put_user(dirent, &buf->previous);
+ err |= __put_user(ino, &dirent->d_ino);
+ err |= __put_user(reclen, &dirent->d_reclen);
+ err |= copy_to_user((char __user *)dirent->d_name, name, namlen) ? -EFAULT : 0;
+ err |= __put_user(0, &dirent->d_name[namlen]);
+ dirent = (struct irix_dirent32 __user *) ((char __user *) dirent + reclen);
+
buf->current_dir = dirent;
buf->count -= reclen;
- return 0;
+ return err;
}
-asmlinkage int irix_ngetdents(unsigned int fd, void * dirent,
- unsigned int count, int *eob)
+asmlinkage int irix_ngetdents(unsigned int fd, void __user * dirent,
+ unsigned int count, int __user *eob)
{
struct file *file;
- struct irix_dirent32 *lastdirent;
+ struct irix_dirent32 __user *lastdirent;
struct irix_dirent32_callback buf;
int error;
@@ -1830,7 +1806,7 @@ asmlinkage int irix_ngetdents(unsigned int fd, void * dirent,
if (!file)
goto out;
- buf.current_dir = (struct irix_dirent32 *) dirent;
+ buf.current_dir = (struct irix_dirent32 __user *) dirent;
buf.previous = NULL;
buf.count = count;
buf.error = 0;
@@ -1870,8 +1846,8 @@ struct irix_dirent64 {
};
struct irix_dirent64_callback {
- struct irix_dirent64 *curr;
- struct irix_dirent64 *previous;
+ struct irix_dirent64 __user *curr;
+ struct irix_dirent64 __user *previous;
int count;
int error;
};
@@ -1879,37 +1855,44 @@ struct irix_dirent64_callback {
#define NAME_OFFSET64(de) ((int) ((de)->d_name - (char *) (de)))
#define ROUND_UP64(x) (((x)+sizeof(u64)-1) & ~(sizeof(u64)-1))
-static int irix_filldir64(void * __buf, const char * name, int namlen,
- loff_t offset, ino_t ino, unsigned int d_type)
+static int irix_filldir64(void *__buf, const char *name,
+ int namlen, loff_t offset, ino_t ino, unsigned int d_type)
{
- struct irix_dirent64 *dirent;
- struct irix_dirent64_callback * buf =
- (struct irix_dirent64_callback *) __buf;
+ struct irix_dirent64 __user *dirent;
+ struct irix_dirent64_callback * buf = __buf;
unsigned short reclen = ROUND_UP64(NAME_OFFSET64(dirent) + namlen + 1);
+ int err = 0;
- buf->error = -EINVAL; /* only used if we fail.. */
+ if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
+ return -EFAULT;
+
+ if (__put_user(-EINVAL, &buf->error)) /* only used if we fail.. */
+ return -EFAULT;
if (reclen > buf->count)
return -EINVAL;
dirent = buf->previous;
if (dirent)
- __put_user(offset, &dirent->d_off);
+ err = __put_user(offset, &dirent->d_off);
dirent = buf->curr;
buf->previous = dirent;
- __put_user(ino, &dirent->d_ino);
- __put_user(reclen, &dirent->d_reclen);
- __copy_to_user(dirent->d_name, name, namlen);
- __put_user(0, &dirent->d_name[namlen]);
- ((char *) dirent) += reclen;
+ err |= __put_user(ino, &dirent->d_ino);
+ err |= __put_user(reclen, &dirent->d_reclen);
+ err |= __copy_to_user((char __user *)dirent->d_name, name, namlen)
+ ? -EFAULT : 0;
+ err |= __put_user(0, &dirent->d_name[namlen]);
+
+ dirent = (struct irix_dirent64 __user *) ((char __user *) dirent + reclen);
+
buf->curr = dirent;
buf->count -= reclen;
- return 0;
+ return err;
}
-asmlinkage int irix_getdents64(int fd, void *dirent, int cnt)
+asmlinkage int irix_getdents64(int fd, void __user *dirent, int cnt)
{
struct file *file;
- struct irix_dirent64 *lastdirent;
+ struct irix_dirent64 __user *lastdirent;
struct irix_dirent64_callback buf;
int error;
@@ -1929,7 +1912,7 @@ asmlinkage int irix_getdents64(int fd, void *dirent, int cnt)
if (cnt < (sizeof(struct irix_dirent64) + 255))
goto out_f;
- buf.curr = (struct irix_dirent64 *) dirent;
+ buf.curr = (struct irix_dirent64 __user *) dirent;
buf.previous = NULL;
buf.count = cnt;
buf.error = 0;
@@ -1941,7 +1924,8 @@ asmlinkage int irix_getdents64(int fd, void *dirent, int cnt)
error = buf.error;
goto out_f;
}
- lastdirent->d_off = (u64) file->f_pos;
+ if (put_user(file->f_pos, &lastdirent->d_off))
+ return -EFAULT;
#ifdef DEBUG_GETDENTS
printk("returning %d\n", cnt - buf.count);
#endif
@@ -1953,10 +1937,10 @@ out:
return error;
}
-asmlinkage int irix_ngetdents64(int fd, void *dirent, int cnt, int *eob)
+asmlinkage int irix_ngetdents64(int fd, void __user *dirent, int cnt, int *eob)
{
struct file *file;
- struct irix_dirent64 *lastdirent;
+ struct irix_dirent64 __user *lastdirent;
struct irix_dirent64_callback buf;
int error;
@@ -1978,7 +1962,7 @@ asmlinkage int irix_ngetdents64(int fd, void *dirent, int cnt, int *eob)
goto out_f;
*eob = 0;
- buf.curr = (struct irix_dirent64 *) dirent;
+ buf.curr = (struct irix_dirent64 __user *) dirent;
buf.previous = NULL;
buf.count = cnt;
buf.error = 0;
@@ -1990,7 +1974,8 @@ asmlinkage int irix_ngetdents64(int fd, void *dirent, int cnt, int *eob)
error = buf.error;
goto out_f;
}
- lastdirent->d_off = (u64) file->f_pos;
+ if (put_user(file->f_pos, &lastdirent->d_off))
+ return -EFAULT;
#ifdef DEBUG_GETDENTS
printk("eob=%d returning %d\n", *eob, cnt - buf.count);
#endif
@@ -2053,14 +2038,14 @@ out:
return retval;
}
-asmlinkage int irix_utssys(char *inbuf, int arg, int type, char *outbuf)
+asmlinkage int irix_utssys(char __user *inbuf, int arg, int type, char __user *outbuf)
{
int retval;
switch(type) {
case 0:
/* uname() */
- retval = irix_uname((struct iuname *)inbuf);
+ retval = irix_uname((struct iuname __user *)inbuf);
goto out;
case 2:
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 0dd0df7a3b04..787ed541d442 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -11,6 +11,7 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -25,6 +26,7 @@
#include <linux/module.h>
#include <asm/bootinfo.h>
+#include <asm/cache.h>
#include <asm/compiler.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
@@ -43,10 +45,6 @@
#define TICK_SIZE (tick_nsec / 1000)
-u64 jiffies_64 = INITIAL_JIFFIES;
-
-EXPORT_SYMBOL(jiffies_64);
-
/*
* forward reference
*/
@@ -76,7 +74,7 @@ int (*rtc_set_mmss)(unsigned long);
static unsigned int sll32_usecs_per_cycle;
/* how many counter cycles in a jiffy */
-static unsigned long cycles_per_jiffy;
+static unsigned long cycles_per_jiffy __read_mostly;
/* Cycle counter value at the previous timer interrupt.. */
static unsigned int timerhi, timerlo;
@@ -98,7 +96,10 @@ static unsigned int null_hpt_read(void)
return 0;
}
-static void null_hpt_init(unsigned int count) { /* nothing */ }
+static void null_hpt_init(unsigned int count)
+{
+ /* nothing */
+}
/*
@@ -108,8 +109,10 @@ static void c0_timer_ack(void)
{
unsigned int count;
+#ifndef CONFIG_SOC_PNX8550 /* pnx8550 resets to zero */
/* Ack this timer interrupt and set the next one. */
expirelo += cycles_per_jiffy;
+#endif
write_c0_compare(expirelo);
/* Check to see if we have missed any timer interrupts. */
@@ -224,7 +227,6 @@ int do_settimeofday(struct timespec *tv)
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
ntp_clear();
-
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index a53b1ed7b386..6f3ff9690686 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -9,7 +9,7 @@
* Copyright (C) 1999 Silicon Graphics, Inc.
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000, 01 MIPS Technologies, Inc.
- * Copyright (C) 2002, 2003, 2004 Maciej W. Rozycki
+ * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki
*/
#include <linux/config.h>
#include <linux/init.h>
@@ -20,12 +20,16 @@
#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
+#include <linux/bootmem.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
#include <asm/break.h>
#include <asm/cpu.h>
+#include <asm/dsp.h>
#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
#include <asm/module.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
@@ -54,14 +58,19 @@ extern asmlinkage void handle_tr(void);
extern asmlinkage void handle_fpe(void);
extern asmlinkage void handle_mdmx(void);
extern asmlinkage void handle_watch(void);
+extern asmlinkage void handle_mt(void);
+extern asmlinkage void handle_dsp(void);
extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void);
-extern int fpu_emulator_cop1Handler(int xcptno, struct pt_regs *xcp,
+extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_soft_struct *ctx);
void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
+void (*board_nmi_handler_setup)(void);
+void (*board_ejtag_handler_setup)(void);
+void (*board_bind_eic_interrupt)(int irq, int regset);
/*
* These constant is for searching for possible module text segments.
@@ -201,32 +210,47 @@ void show_regs(struct pt_regs *regs)
printk("Status: %08x ", (uint32_t) regs->cp0_status);
- if (regs->cp0_status & ST0_KX)
- printk("KX ");
- if (regs->cp0_status & ST0_SX)
- printk("SX ");
- if (regs->cp0_status & ST0_UX)
- printk("UX ");
- switch (regs->cp0_status & ST0_KSU) {
- case KSU_USER:
- printk("USER ");
- break;
- case KSU_SUPERVISOR:
- printk("SUPERVISOR ");
- break;
- case KSU_KERNEL:
- printk("KERNEL ");
- break;
- default:
- printk("BAD_MODE ");
- break;
+ if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
+ if (regs->cp0_status & ST0_KUO)
+ printk("KUo ");
+ if (regs->cp0_status & ST0_IEO)
+ printk("IEo ");
+ if (regs->cp0_status & ST0_KUP)
+ printk("KUp ");
+ if (regs->cp0_status & ST0_IEP)
+ printk("IEp ");
+ if (regs->cp0_status & ST0_KUC)
+ printk("KUc ");
+ if (regs->cp0_status & ST0_IEC)
+ printk("IEc ");
+ } else {
+ if (regs->cp0_status & ST0_KX)
+ printk("KX ");
+ if (regs->cp0_status & ST0_SX)
+ printk("SX ");
+ if (regs->cp0_status & ST0_UX)
+ printk("UX ");
+ switch (regs->cp0_status & ST0_KSU) {
+ case KSU_USER:
+ printk("USER ");
+ break;
+ case KSU_SUPERVISOR:
+ printk("SUPERVISOR ");
+ break;
+ case KSU_KERNEL:
+ printk("KERNEL ");
+ break;
+ default:
+ printk("BAD_MODE ");
+ break;
+ }
+ if (regs->cp0_status & ST0_ERL)
+ printk("ERL ");
+ if (regs->cp0_status & ST0_EXL)
+ printk("EXL ");
+ if (regs->cp0_status & ST0_IE)
+ printk("IE ");
}
- if (regs->cp0_status & ST0_ERL)
- printk("ERL ");
- if (regs->cp0_status & ST0_EXL)
- printk("EXL ");
- if (regs->cp0_status & ST0_IE)
- printk("IE ");
printk("\n");
printk("Cause : %08x\n", cause);
@@ -252,29 +276,18 @@ void show_registers(struct pt_regs *regs)
static DEFINE_SPINLOCK(die_lock);
-NORET_TYPE void __die(const char * str, struct pt_regs * regs,
- const char * file, const char * func, unsigned long line)
+NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
{
static int die_counter;
console_verbose();
spin_lock_irq(&die_lock);
- printk("%s", str);
- if (file && func)
- printk(" in %s:%s, line %ld", file, func, line);
- printk("[#%d]:\n", ++die_counter);
+ printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
spin_unlock_irq(&die_lock);
do_exit(SIGSEGV);
}
-void __die_if_kernel(const char * str, struct pt_regs * regs,
- const char * file, const char * func, unsigned long line)
-{
- if (!user_mode(regs))
- __die(str, regs, file, func, line);
-}
-
extern const struct exception_table_entry __start___dbe_table[];
extern const struct exception_table_entry __stop___dbe_table[];
@@ -339,9 +352,9 @@ asmlinkage void do_be(struct pt_regs *regs)
static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
{
- unsigned int *epc;
+ unsigned int __user *epc;
- epc = (unsigned int *) regs->cp0_epc +
+ epc = (unsigned int __user *) regs->cp0_epc +
((regs->cp0_cause & CAUSEF_BD) != 0);
if (!get_user(*opcode, epc))
return 0;
@@ -360,6 +373,10 @@ static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
#define OFFSET 0x0000ffff
#define LL 0xc0000000
#define SC 0xe0000000
+#define SPEC3 0x7c000000
+#define RD 0x0000f800
+#define FUNC 0x0000003f
+#define RDHWR 0x0000003b
/*
* The ll_bit is cleared by r*_switch.S
@@ -371,7 +388,7 @@ static struct task_struct *ll_task = NULL;
static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
{
- unsigned long value, *vaddr;
+ unsigned long value, __user *vaddr;
long offset;
int signal = 0;
@@ -385,7 +402,8 @@ static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
offset <<= 16;
offset >>= 16;
- vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+ vaddr = (unsigned long __user *)
+ ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
if ((unsigned long)vaddr & 3) {
signal = SIGBUS;
@@ -407,9 +425,10 @@ static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
preempt_enable();
+ compute_return_epc(regs);
+
regs->regs[(opcode & RT) >> 16] = value;
- compute_return_epc(regs);
return;
sig:
@@ -418,7 +437,8 @@ sig:
static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
{
- unsigned long *vaddr, reg;
+ unsigned long __user *vaddr;
+ unsigned long reg;
long offset;
int signal = 0;
@@ -432,7 +452,8 @@ static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
offset <<= 16;
offset >>= 16;
- vaddr = (unsigned long *)((long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+ vaddr = (unsigned long __user *)
+ ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
reg = (opcode & RT) >> 16;
if ((unsigned long)vaddr & 3) {
@@ -443,9 +464,9 @@ static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
preempt_disable();
if (ll_bit == 0 || ll_task != current) {
+ compute_return_epc(regs);
regs->regs[reg] = 0;
preempt_enable();
- compute_return_epc(regs);
return;
}
@@ -456,9 +477,9 @@ static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
goto sig;
}
+ compute_return_epc(regs);
regs->regs[reg] = 1;
- compute_return_epc(regs);
return;
sig:
@@ -491,6 +512,37 @@ static inline int simulate_llsc(struct pt_regs *regs)
return -EFAULT; /* Strange things going on ... */
}
+/*
+ * Simulate trapping 'rdhwr' instructions to provide user accessible
+ * registers not implemented in hardware. The only current use of this
+ * is the thread area pointer.
+ */
+static inline int simulate_rdhwr(struct pt_regs *regs)
+{
+ struct thread_info *ti = current->thread_info;
+ unsigned int opcode;
+
+ if (unlikely(get_insn_opcode(regs, &opcode)))
+ return -EFAULT;
+
+ if (unlikely(compute_return_epc(regs)))
+ return -EFAULT;
+
+ if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
+ int rd = (opcode & RD) >> 11;
+ int rt = (opcode & RT) >> 16;
+ switch (rd) {
+ case 29:
+ regs->regs[rt] = ti->tp_value;
+ break;
+ default:
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
asmlinkage void do_ov(struct pt_regs *regs)
{
siginfo_t info;
@@ -498,7 +550,7 @@ asmlinkage void do_ov(struct pt_regs *regs)
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;
- info.si_addr = (void *)regs->cp0_epc;
+ info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
}
@@ -512,6 +564,14 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
preempt_disable();
+#ifdef CONFIG_PREEMPT
+ if (!is_fpu_owner()) {
+ /* We might lose fpu before disabling preempt... */
+ own_fpu();
+ BUG_ON(!used_math());
+ restore_fp(current);
+ }
+#endif
/*
* Unimplemented operation exception. If we've got the full
* software emulator on-board, let's use it...
@@ -523,11 +583,18 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
* a bit extreme for what should be an infrequent event.
*/
save_fp(current);
+ /* Ensure 'resume' not overwrite saved fp context again. */
+ lose_fpu();
+
+ preempt_enable();
/* Run the emulator */
- sig = fpu_emulator_cop1Handler (0, regs,
+ sig = fpu_emulator_cop1Handler (regs,
&current->thread.fpu.soft);
+ preempt_disable();
+
+ own_fpu(); /* Using the FPU again. */
/*
* We can't allow the emulated instruction to leave any of
* the cause bit set in $fcr31.
@@ -584,7 +651,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;
- info.si_addr = (void *)regs->cp0_epc;
+ info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
break;
default:
@@ -621,7 +688,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;
- info.si_addr = (void *)regs->cp0_epc;
+ info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
break;
default:
@@ -637,6 +704,9 @@ asmlinkage void do_ri(struct pt_regs *regs)
if (!simulate_llsc(regs))
return;
+ if (!simulate_rdhwr(regs))
+ return;
+
force_sig(SIGILL, current);
}
@@ -650,11 +720,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
switch (cpid) {
case 0:
- if (cpu_has_llsc)
- break;
+ if (!cpu_has_llsc)
+ if (!simulate_llsc(regs))
+ return;
- if (!simulate_llsc(regs))
+ if (!simulate_rdhwr(regs))
return;
+
break;
case 1:
@@ -668,15 +740,15 @@ asmlinkage void do_cpu(struct pt_regs *regs)
set_used_math();
}
+ preempt_enable();
+
if (!cpu_has_fpu) {
- int sig = fpu_emulator_cop1Handler(0, regs,
+ int sig = fpu_emulator_cop1Handler(regs,
&current->thread.fpu.soft);
if (sig)
force_sig(sig, current);
}
- preempt_enable();
-
return;
case 2:
@@ -716,6 +788,22 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
(regs->cp0_status & ST0_TS) ? "" : "not ");
}
+asmlinkage void do_mt(struct pt_regs *regs)
+{
+ die_if_kernel("MIPS MT Thread exception in kernel", regs);
+
+ force_sig(SIGILL, current);
+}
+
+
+asmlinkage void do_dsp(struct pt_regs *regs)
+{
+ if (cpu_has_dsp)
+ panic("Unexpected DSP exception\n");
+
+ force_sig(SIGILL, current);
+}
+
asmlinkage void do_reserved(struct pt_regs *regs)
{
/*
@@ -728,6 +816,12 @@ asmlinkage void do_reserved(struct pt_regs *regs)
(regs->cp0_cause & 0x7f) >> 2);
}
+asmlinkage void do_default_vi(struct pt_regs *regs)
+{
+ show_regs(regs);
+ panic("Caught unexpected vectored interrupt.");
+}
+
/*
* Some MIPS CPUs can enable/disable for cache parity detection, but do
* it different ways.
@@ -736,16 +830,12 @@ static inline void parity_protection_init(void)
{
switch (current_cpu_data.cputype) {
case CPU_24K:
- /* 24K cache parity not currently implemented in FPGA */
- printk(KERN_INFO "Disable cache parity protection for "
- "MIPS 24K CPU.\n");
- write_c0_ecc(read_c0_ecc() & ~0x80000000);
- break;
case CPU_5KC:
- /* Set the PE bit (bit 31) in the c0_ecc register. */
- printk(KERN_INFO "Enable cache parity protection for "
- "MIPS 5KC/24K CPUs.\n");
- write_c0_ecc(read_c0_ecc() | 0x80000000);
+ write_c0_ecc(0x80000000);
+ back_to_back_c0_hazard();
+ /* Set the PE bit (bit 31) in the c0_errctl register. */
+ printk(KERN_INFO "Cache parity protection %sabled\n",
+ (read_c0_ecc() & 0x80000000) ? "en" : "dis");
break;
case CPU_20KC:
case CPU_25KF:
@@ -783,7 +873,7 @@ asmlinkage void cache_parity_error(void)
reg_val & (1<<22) ? "E0 " : "");
printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
-#if defined(CONFIG_CPU_MIPS32) || defined (CONFIG_CPU_MIPS64)
+#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
if (reg_val & (1<<22))
printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
@@ -840,7 +930,11 @@ void nmi_exception_handler(struct pt_regs *regs)
while(1) ;
}
+#define VECTORSPACING 0x100 /* for EI/VI mode */
+
+unsigned long ebase;
unsigned long exception_handlers[32];
+unsigned long vi_handlers[64];
/*
* As a side effect of the way this is implemented we're limited
@@ -854,13 +948,156 @@ void *set_except_vector(int n, void *addr)
exception_handlers[n] = handler;
if (n == 0 && cpu_has_divec) {
- *(volatile u32 *)(CAC_BASE + 0x200) = 0x08000000 |
+ *(volatile u32 *)(ebase + 0x200) = 0x08000000 |
(0x03ffffff & (handler >> 2));
- flush_icache_range(CAC_BASE + 0x200, CAC_BASE + 0x204);
+ flush_icache_range(ebase + 0x200, ebase + 0x204);
}
return (void *)old_handler;
}
+#ifdef CONFIG_CPU_MIPSR2
+/*
+ * Shadow register allocation
+ * FIXME: SMP...
+ */
+
+/* MIPSR2 shadow register sets */
+struct shadow_registers {
+ spinlock_t sr_lock; /* */
+ int sr_supported; /* Number of shadow register sets supported */
+ int sr_allocated; /* Bitmap of allocated shadow registers */
+} shadow_registers;
+
+void mips_srs_init(void)
+{
+#ifdef CONFIG_CPU_MIPSR2_SRS
+ shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
+ printk ("%d MIPSR2 register sets available\n", shadow_registers.sr_supported);
+#else
+ shadow_registers.sr_supported = 1;
+#endif
+ shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */
+ spin_lock_init(&shadow_registers.sr_lock);
+}
+
+int mips_srs_max(void)
+{
+ return shadow_registers.sr_supported;
+}
+
+int mips_srs_alloc (void)
+{
+ struct shadow_registers *sr = &shadow_registers;
+ unsigned long flags;
+ int set;
+
+ spin_lock_irqsave(&sr->sr_lock, flags);
+
+ for (set = 0; set < sr->sr_supported; set++) {
+ if ((sr->sr_allocated & (1 << set)) == 0) {
+ sr->sr_allocated |= 1 << set;
+ spin_unlock_irqrestore(&sr->sr_lock, flags);
+ return set;
+ }
+ }
+
+ /* None available */
+ spin_unlock_irqrestore(&sr->sr_lock, flags);
+ return -1;
+}
+
+void mips_srs_free (int set)
+{
+ struct shadow_registers *sr = &shadow_registers;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sr->sr_lock, flags);
+ sr->sr_allocated &= ~(1 << set);
+ spin_unlock_irqrestore(&sr->sr_lock, flags);
+}
+
+void *set_vi_srs_handler (int n, void *addr, int srs)
+{
+ unsigned long handler;
+ unsigned long old_handler = vi_handlers[n];
+ u32 *w;
+ unsigned char *b;
+
+ if (!cpu_has_veic && !cpu_has_vint)
+ BUG();
+
+ if (addr == NULL) {
+ handler = (unsigned long) do_default_vi;
+ srs = 0;
+ }
+ else
+ handler = (unsigned long) addr;
+ vi_handlers[n] = (unsigned long) addr;
+
+ b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
+
+ if (srs >= mips_srs_max())
+ panic("Shadow register set %d not supported", srs);
+
+ if (cpu_has_veic) {
+ if (board_bind_eic_interrupt)
+ board_bind_eic_interrupt (n, srs);
+ }
+ else if (cpu_has_vint) {
+ /* SRSMap is only defined if shadow sets are implemented */
+ if (mips_srs_max() > 1)
+ change_c0_srsmap (0xf << n*4, srs << n*4);
+ }
+
+ if (srs == 0) {
+ /*
+ * If no shadow set is selected then use the default handler
+ * that does normal register saving and a standard interrupt exit
+ */
+
+ extern char except_vec_vi, except_vec_vi_lui;
+ extern char except_vec_vi_ori, except_vec_vi_end;
+ const int handler_len = &except_vec_vi_end - &except_vec_vi;
+ const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
+ const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
+
+ if (handler_len > VECTORSPACING) {
+ /*
+ * Sigh... panicing won't help as the console
+ * is probably not configured :(
+ */
+ panic ("VECTORSPACING too small");
+ }
+
+ memcpy (b, &except_vec_vi, handler_len);
+ w = (u32 *)(b + lui_offset);
+ *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
+ w = (u32 *)(b + ori_offset);
+ *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
+ flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
+ }
+ else {
+ /*
+ * In other cases jump directly to the interrupt handler
+ *
+ * It is the handlers responsibility to save registers if required
+ * (eg hi/lo) and return from the exception using "eret"
+ */
+ w = (u32 *)b;
+ *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
+ *w = 0;
+ flush_icache_range((unsigned long)b, (unsigned long)(b+8));
+ }
+
+ return (void *)old_handler;
+}
+
+void *set_vi_handler (int n, void *addr)
+{
+ return set_vi_srs_handler (n, addr, 0);
+}
+#endif
+
/*
* This is used by native signal handling
*/
@@ -912,6 +1149,7 @@ static inline void signal32_init(void)
extern void cpu_cache_init(void);
extern void tlb_init(void);
+extern void flush_tlb_handlers(void);
void __init per_cpu_trap_init(void)
{
@@ -929,15 +1167,32 @@ void __init per_cpu_trap_init(void)
#endif
if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
status_set |= ST0_XX;
- change_c0_status(ST0_CU|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
+ change_c0_status(ST0_CU|ST0_MX|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
+ if (cpu_has_dsp)
+ set_c0_status(ST0_MX);
+
+#ifdef CONFIG_CPU_MIPSR2
+ write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
+#endif
+
/*
- * Some MIPS CPUs have a dedicated interrupt vector which reduces the
- * interrupt processing overhead. Use it where available.
+ * Interrupt handling.
*/
- if (cpu_has_divec)
- set_c0_cause(CAUSEF_IV);
+ if (cpu_has_veic || cpu_has_vint) {
+ write_c0_ebase (ebase);
+ /* Setting vector spacing enables EI/VI mode */
+ change_c0_intctl (0x3e0, VECTORSPACING);
+ }
+ if (cpu_has_divec) {
+ if (cpu_has_mipsmt) {
+ unsigned int vpflags = dvpe();
+ set_c0_cause(CAUSEF_IV);
+ evpe(vpflags);
+ } else
+ set_c0_cause(CAUSEF_IV);
+ }
cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
TLBMISS_HANDLER_SETUP();
@@ -951,13 +1206,41 @@ void __init per_cpu_trap_init(void)
tlb_init();
}
+/* Install CPU exception handler */
+void __init set_handler (unsigned long offset, void *addr, unsigned long size)
+{
+ memcpy((void *)(ebase + offset), addr, size);
+ flush_icache_range(ebase + offset, ebase + offset + size);
+}
+
+/* Install uncached CPU exception handler */
+void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size)
+{
+#ifdef CONFIG_32BIT
+ unsigned long uncached_ebase = KSEG1ADDR(ebase);
+#endif
+#ifdef CONFIG_64BIT
+ unsigned long uncached_ebase = TO_UNCAC(ebase);
+#endif
+
+ memcpy((void *)(uncached_ebase + offset), addr, size);
+}
+
void __init trap_init(void)
{
extern char except_vec3_generic, except_vec3_r4000;
- extern char except_vec_ejtag_debug;
extern char except_vec4;
unsigned long i;
+ if (cpu_has_veic || cpu_has_vint)
+ ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64);
+ else
+ ebase = CAC_BASE;
+
+#ifdef CONFIG_CPU_MIPSR2
+ mips_srs_init();
+#endif
+
per_cpu_trap_init();
/*
@@ -965,7 +1248,7 @@ void __init trap_init(void)
* This will be overriden later as suitable for a particular
* configuration.
*/
- memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
+ set_handler(0x180, &except_vec3_generic, 0x80);
/*
* Setup default vectors
@@ -977,8 +1260,8 @@ void __init trap_init(void)
* Copy the EJTAG debug exception vector handler code to it's final
* destination.
*/
- if (cpu_has_ejtag)
- memcpy((void *)(CAC_BASE + 0x300), &except_vec_ejtag_debug, 0x80);
+ if (cpu_has_ejtag && board_ejtag_handler_setup)
+ board_ejtag_handler_setup ();
/*
* Only some CPUs have the watch exceptions.
@@ -987,11 +1270,15 @@ void __init trap_init(void)
set_except_vector(23, handle_watch);
/*
- * Some MIPS CPUs have a dedicated interrupt vector which reduces the
- * interrupt processing overhead. Use it where available.
+ * Initialise interrupt handlers
*/
- if (cpu_has_divec)
- memcpy((void *)(CAC_BASE + 0x200), &except_vec4, 0x8);
+ if (cpu_has_veic || cpu_has_vint) {
+ int nvec = cpu_has_veic ? 64 : 8;
+ for (i = 0; i < nvec; i++)
+ set_vi_handler (i, NULL);
+ }
+ else if (cpu_has_divec)
+ set_handler(0x200, &except_vec4, 0x8);
/*
* Some CPUs can enable/disable for cache parity detection, but does
@@ -1023,21 +1310,6 @@ void __init trap_init(void)
set_except_vector(11, handle_cpu);
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);
- set_except_vector(22, handle_mdmx);
-
- if (cpu_has_fpu && !cpu_has_nofpuex)
- set_except_vector(15, handle_fpe);
-
- if (cpu_has_mcheck)
- set_except_vector(24, handle_mcheck);
-
- if (cpu_has_vce)
- /* Special exception: R4[04]00 uses also the divec space. */
- memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
- else if (cpu_has_4kex)
- memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
- else
- memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
if (current_cpu_data.cputype == CPU_R6000 ||
current_cpu_data.cputype == CPU_R6000A) {
@@ -1053,10 +1325,37 @@ void __init trap_init(void)
//set_except_vector(15, handle_ndc);
}
+
+ if (board_nmi_handler_setup)
+ board_nmi_handler_setup();
+
+ if (cpu_has_fpu && !cpu_has_nofpuex)
+ set_except_vector(15, handle_fpe);
+
+ set_except_vector(22, handle_mdmx);
+
+ if (cpu_has_mcheck)
+ set_except_vector(24, handle_mcheck);
+
+ if (cpu_has_mipsmt)
+ set_except_vector(25, handle_mt);
+
+ if (cpu_has_dsp)
+ set_except_vector(26, handle_dsp);
+
+ if (cpu_has_vce)
+ /* Special exception: R4[04]00 uses also the divec space. */
+ memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
+ else if (cpu_has_4kex)
+ memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
+ else
+ memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
+
signal_init();
#ifdef CONFIG_MIPS32_COMPAT
signal32_init();
#endif
- flush_icache_range(CAC_BASE, CAC_BASE + 0x400);
+ flush_icache_range(ebase, ebase + 0x400);
+ flush_tlb_handlers();
}
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 36c5212e0928..5b5a3736cbbc 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -94,7 +94,7 @@ unsigned long unaligned_instructions;
#endif
static inline int emulate_load_store_insn(struct pt_regs *regs,
- void *addr, unsigned long pc,
+ void __user *addr, unsigned int __user *pc,
unsigned long **regptr, unsigned long *newvalue)
{
union mips_instruction insn;
@@ -107,7 +107,7 @@ static inline int emulate_load_store_insn(struct pt_regs *regs,
/*
* This load never faults.
*/
- __get_user(insn.word, (unsigned int *)pc);
+ __get_user(insn.word, pc);
switch (insn.i_format.opcode) {
/*
@@ -494,8 +494,8 @@ asmlinkage void do_ade(struct pt_regs *regs)
{
unsigned long *regptr, newval;
extern int do_dsemulret(struct pt_regs *);
+ unsigned int __user *pc;
mm_segment_t seg;
- unsigned long pc;
/*
* Address errors may be deliberately induced by the FPU emulator to
@@ -515,7 +515,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
goto sigbus;
- pc = exception_epc(regs);
+ pc = (unsigned int __user *) exception_epc(regs);
if ((current->thread.mflags & MF_FIXADE) == 0)
goto sigbus;
@@ -526,7 +526,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
seg = get_fs();
if (!user_mode(regs))
set_fs(KERNEL_DS);
- if (!emulate_load_store_insn(regs, (void *)regs->cp0_badvaddr, pc,
+ if (!emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc,
&regptr, &newval)) {
compute_return_epc(regs);
/*
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 482ac310c937..25cc856d8e7e 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -54,13 +54,6 @@ SECTIONS
*(.data)
- /* Align the initial ramdisk image (INITRD) on page boundaries. */
- . = ALIGN(4096);
- __rd_start = .;
- *(.initrd)
- . = ALIGN(4096);
- __rd_end = .;
-
CONSTRUCTORS
}
_gp = . + 0x8000;
@@ -96,12 +89,6 @@ SECTIONS
.init.setup : { *(.init.setup) }
__setup_end = .;
- .early_initcall.init : {
- __earlyinitcall_start = .;
- *(.initcall.early1.init)
- }
- __earlyinitcall_end = .;
-
__initcall_start = .;
.initcall.init : {
*(.initcall1.init)
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
new file mode 100644
index 000000000000..97fefcc9dbe7
--- /dev/null
+++ b/arch/mips/kernel/vpe.c
@@ -0,0 +1,1296 @@
+/*
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ */
+
+/*
+ * VPE support module
+ *
+ * Provides support for loading a MIPS SP program on VPE1.
+ * The SP enviroment is rather simple, no tlb's. It needs to be relocatable
+ * (or partially linked). You should initialise your stack in the startup
+ * code. This loader looks for the symbol __start and sets up
+ * execution to resume from there. The MIPS SDE kit contains suitable examples.
+ *
+ * To load and run, simply cat a SP 'program file' to /dev/vpe1.
+ * i.e cat spapp >/dev/vpe1.
+ *
+ * You'll need to have the following device files.
+ * mknod /dev/vpe0 c 63 0
+ * mknod /dev/vpe1 c 63 1
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <asm/uaccess.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+#include <linux/elf.h>
+#include <linux/seq_file.h>
+#include <linux/syscalls.h>
+#include <linux/moduleloader.h>
+#include <linux/interrupt.h>
+#include <linux/poll.h>
+#include <linux/bootmem.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/cacheflush.h>
+#include <asm/atomic.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+
+typedef void *vpe_handle;
+
+// defined here because the kernel module loader doesn't have
+// anything to do with it.
+#define SHN_MIPS_SCOMMON 0xff03
+
+#ifndef ARCH_SHF_SMALL
+#define ARCH_SHF_SMALL 0
+#endif
+
+/* If this is set, the section belongs in the init part of the module */
+#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
+
+// temp number,
+#define VPE_MAJOR 63
+
+static char module_name[] = "vpe";
+static int major = 0;
+
+/* grab the likely amount of memory we will need. */
+#ifdef CONFIG_MIPS_VPE_LOADER_TOM
+#define P_SIZE (2 * 1024 * 1024)
+#else
+/* add an overhead to the max kmalloc size for non-striped symbols/etc */
+#define P_SIZE (256 * 1024)
+#endif
+
+#define MAX_VPES 16
+
+enum vpe_state {
+ VPE_STATE_UNUSED = 0,
+ VPE_STATE_INUSE,
+ VPE_STATE_RUNNING
+};
+
+enum tc_state {
+ TC_STATE_UNUSED = 0,
+ TC_STATE_INUSE,
+ TC_STATE_RUNNING,
+ TC_STATE_DYNAMIC
+};
+
+struct vpe;
+typedef struct tc {
+ enum tc_state state;
+ int index;
+
+ /* parent VPE */
+ struct vpe *pvpe;
+
+ /* The list of TC's with this VPE */
+ struct list_head tc;
+
+ /* The global list of tc's */
+ struct list_head list;
+} tc_t;
+
+typedef struct vpe {
+ enum vpe_state state;
+
+ /* (device) minor associated with this vpe */
+ int minor;
+
+ /* elfloader stuff */
+ void *load_addr;
+ u32 len;
+ char *pbuffer;
+ u32 plen;
+
+ unsigned long __start;
+
+ /* tc's associated with this vpe */
+ struct list_head tc;
+
+ /* The list of vpe's */
+ struct list_head list;
+
+ /* shared symbol address */
+ void *shared_ptr;
+} vpe_t;
+
+struct vpecontrol_ {
+ /* Virtual processing elements */
+ struct list_head vpe_list;
+
+ /* Thread contexts */
+ struct list_head tc_list;
+} vpecontrol;
+
+static void release_progmem(void *ptr);
+static void dump_vpe(vpe_t * v);
+extern void save_gp_address(unsigned int secbase, unsigned int rel);
+
+/* get the vpe associated with this minor */
+struct vpe *get_vpe(int minor)
+{
+ struct vpe *v;
+
+ list_for_each_entry(v, &vpecontrol.vpe_list, list) {
+ if (v->minor == minor)
+ return v;
+ }
+
+ printk(KERN_DEBUG "VPE: get_vpe minor %d not found\n", minor);
+ return NULL;
+}
+
+/* get the vpe associated with this minor */
+struct tc *get_tc(int index)
+{
+ struct tc *t;
+
+ list_for_each_entry(t, &vpecontrol.tc_list, list) {
+ if (t->index == index)
+ return t;
+ }
+
+ printk(KERN_DEBUG "VPE: get_tc index %d not found\n", index);
+
+ return NULL;
+}
+
+struct tc *get_tc_unused(void)
+{
+ struct tc *t;
+
+ list_for_each_entry(t, &vpecontrol.tc_list, list) {
+ if (t->state == TC_STATE_UNUSED)
+ return t;
+ }
+
+ printk(KERN_DEBUG "VPE: All TC's are in use\n");
+
+ return NULL;
+}
+
+/* allocate a vpe and associate it with this minor (or index) */
+struct vpe *alloc_vpe(int minor)
+{
+ struct vpe *v;
+
+ if ((v = kmalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) {
+ printk(KERN_WARNING "VPE: alloc_vpe no mem\n");
+ return NULL;
+ }
+
+ memset(v, 0, sizeof(struct vpe));
+
+ INIT_LIST_HEAD(&v->tc);
+ list_add_tail(&v->list, &vpecontrol.vpe_list);
+
+ v->minor = minor;
+ return v;
+}
+
+/* allocate a tc. At startup only tc0 is running, all other can be halted. */
+struct tc *alloc_tc(int index)
+{
+ struct tc *t;
+
+ if ((t = kmalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) {
+ printk(KERN_WARNING "VPE: alloc_tc no mem\n");
+ return NULL;
+ }
+
+ memset(t, 0, sizeof(struct tc));
+
+ INIT_LIST_HEAD(&t->tc);
+ list_add_tail(&t->list, &vpecontrol.tc_list);
+
+ t->index = index;
+
+ return t;
+}
+
+/* clean up and free everything */
+void release_vpe(struct vpe *v)
+{
+ list_del(&v->list);
+ if (v->load_addr)
+ release_progmem(v);
+ kfree(v);
+}
+
+void dump_mtregs(void)
+{
+ unsigned long val;
+
+ val = read_c0_config3();
+ printk("config3 0x%lx MT %ld\n", val,
+ (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT);
+
+ val = read_c0_mvpconf0();
+ printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
+ (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
+ val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
+
+ val = read_c0_mvpcontrol();
+ printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val,
+ (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT,
+ (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT,
+ (val & MVPCONTROL_EVP));
+
+ val = read_c0_vpeconf0();
+ printk("VPEConf0 0x%lx MVP %ld\n", val,
+ (val & VPECONF0_MVP) >> VPECONF0_MVP_SHIFT);
+}
+
+/* Find some VPE program space */
+static void *alloc_progmem(u32 len)
+{
+#ifdef CONFIG_MIPS_VPE_LOADER_TOM
+ /* this means you must tell linux to use less memory than you physically have */
+ return (void *)((max_pfn * PAGE_SIZE) + KSEG0);
+#else
+ // simple grab some mem for now
+ return kmalloc(len, GFP_KERNEL);
+#endif
+}
+
+static void release_progmem(void *ptr)
+{
+#ifndef CONFIG_MIPS_VPE_LOADER_TOM
+ kfree(ptr);
+#endif
+}
+
+/* Update size with this section: return offset. */
+static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
+{
+ long ret;
+
+ ret = ALIGN(*size, sechdr->sh_addralign ? : 1);
+ *size = ret + sechdr->sh_size;
+ return ret;
+}
+
+/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
+ might -- code, read-only data, read-write data, small data. Tally
+ sizes, and place the offsets into sh_entsize fields: high bit means it
+ belongs in init. */
+static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
+ Elf_Shdr * sechdrs, const char *secstrings)
+{
+ static unsigned long const masks[][2] = {
+ /* NOTE: all executable code must be the first section
+ * in this array; otherwise modify the text_size
+ * finder in the two loops below */
+ {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL},
+ {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL},
+ {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL},
+ {ARCH_SHF_SMALL | SHF_ALLOC, 0}
+ };
+ unsigned int m, i;
+
+ for (i = 0; i < hdr->e_shnum; i++)
+ sechdrs[i].sh_entsize = ~0UL;
+
+ for (m = 0; m < ARRAY_SIZE(masks); ++m) {
+ for (i = 0; i < hdr->e_shnum; ++i) {
+ Elf_Shdr *s = &sechdrs[i];
+
+ // || strncmp(secstrings + s->sh_name, ".init", 5) == 0)
+ if ((s->sh_flags & masks[m][0]) != masks[m][0]
+ || (s->sh_flags & masks[m][1])
+ || s->sh_entsize != ~0UL)
+ continue;
+ s->sh_entsize = get_offset(&mod->core_size, s);
+ }
+
+ if (m == 0)
+ mod->core_text_size = mod->core_size;
+
+ }
+}
+
+
+/* from module-elf32.c, but subverted a little */
+
+struct mips_hi16 {
+ struct mips_hi16 *next;
+ Elf32_Addr *addr;
+ Elf32_Addr value;
+};
+
+static struct mips_hi16 *mips_hi16_list;
+static unsigned int gp_offs, gp_addr;
+
+static int apply_r_mips_none(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ return 0;
+}
+
+static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ int rel;
+
+ if( !(*location & 0xffff) ) {
+ rel = (int)v - gp_addr;
+ }
+ else {
+ /* .sbss + gp(relative) + offset */
+ /* kludge! */
+ rel = (int)(short)((int)v + gp_offs +
+ (int)(short)(*location & 0xffff) - gp_addr);
+ }
+
+ if( (rel > 32768) || (rel < -32768) ) {
+ printk(KERN_ERR
+ "apply_r_mips_gprel16: relative address out of range 0x%x %d\n",
+ rel, rel);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & 0xffff0000) | (rel & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_pc16(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ int rel;
+ rel = (((unsigned int)v - (unsigned int)location));
+ rel >>= 2; // because the offset is in _instructions_ not bytes.
+ rel -= 1; // and one instruction less due to the branch delay slot.
+
+ if( (rel > 32768) || (rel < -32768) ) {
+ printk(KERN_ERR
+ "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & 0xffff0000) | (rel & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_32(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ *location += v;
+
+ return 0;
+}
+
+static int apply_r_mips_26(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ if (v % 4) {
+ printk(KERN_ERR "module %s: dangerous relocation mod4\n", me->name);
+ return -ENOEXEC;
+ }
+
+/* Not desperately convinced this is a good check of an overflow condition
+ anyway. But it gets in the way of handling undefined weak symbols which
+ we want to set to zero.
+ if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+ printk(KERN_ERR
+ "module %s: relocation overflow\n",
+ me->name);
+ return -ENOEXEC;
+ }
+*/
+
+ *location = (*location & ~0x03ffffff) |
+ ((*location + (v >> 2)) & 0x03ffffff);
+ return 0;
+}
+
+static int apply_r_mips_hi16(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ struct mips_hi16 *n;
+
+ /*
+ * We cannot relocate this one now because we don't know the value of
+ * the carry we need to add. Save the information, and let LO16 do the
+ * actual relocation.
+ */
+ n = kmalloc(sizeof *n, GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ n->addr = location;
+ n->value = v;
+ n->next = mips_hi16_list;
+ mips_hi16_list = n;
+
+ return 0;
+}
+
+static int apply_r_mips_lo16(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ unsigned long insnlo = *location;
+ Elf32_Addr val, vallo;
+
+ /* Sign extend the addend we extract from the lo insn. */
+ vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
+
+ if (mips_hi16_list != NULL) {
+ struct mips_hi16 *l;
+
+ l = mips_hi16_list;
+ while (l != NULL) {
+ struct mips_hi16 *next;
+ unsigned long insn;
+
+ /*
+ * The value for the HI16 had best be the same.
+ */
+ if (v != l->value) {
+ printk("%d != %d\n", v, l->value);
+ goto out_danger;
+ }
+
+
+ /*
+ * Do the HI16 relocation. Note that we actually don't
+ * need to know anything about the LO16 itself, except
+ * where to find the low 16 bits of the addend needed
+ * by the LO16.
+ */
+ insn = *l->addr;
+ val = ((insn & 0xffff) << 16) + vallo;
+ val += v;
+
+ /*
+ * Account for the sign extension that will happen in
+ * the low bits.
+ */
+ val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
+
+ insn = (insn & ~0xffff) | val;
+ *l->addr = insn;
+
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
+
+ mips_hi16_list = NULL;
+ }
+
+ /*
+ * Ok, we're done with the HI16 relocs. Now deal with the LO16.
+ */
+ val = v + vallo;
+ insnlo = (insnlo & ~0xffff) | (val & 0xffff);
+ *location = insnlo;
+
+ return 0;
+
+out_danger:
+ printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name);
+
+ return -ENOEXEC;
+}
+
+static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
+ Elf32_Addr v) = {
+ [R_MIPS_NONE] = apply_r_mips_none,
+ [R_MIPS_32] = apply_r_mips_32,
+ [R_MIPS_26] = apply_r_mips_26,
+ [R_MIPS_HI16] = apply_r_mips_hi16,
+ [R_MIPS_LO16] = apply_r_mips_lo16,
+ [R_MIPS_GPREL16] = apply_r_mips_gprel16,
+ [R_MIPS_PC16] = apply_r_mips_pc16
+};
+
+
+int apply_relocations(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ uint32_t *location;
+ unsigned int i;
+ Elf32_Addr v;
+ int res;
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ Elf32_Word r_info = rel[i].r_info;
+
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(r_info);
+
+ if (!sym->st_value) {
+ printk(KERN_DEBUG "%s: undefined weak symbol %s\n",
+ me->name, strtab + sym->st_name);
+ /* just print the warning, dont barf */
+ }
+
+ v = sym->st_value;
+
+ res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
+ if( res ) {
+ printk(KERN_DEBUG
+ "relocation error 0x%x sym refer <%s> value 0x%x "
+ "type 0x%x r_info 0x%x\n",
+ (unsigned int)location, strtab + sym->st_name, v,
+ r_info, ELF32_R_TYPE(r_info));
+ }
+
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+void save_gp_address(unsigned int secbase, unsigned int rel)
+{
+ gp_addr = secbase + rel;
+ gp_offs = gp_addr - (secbase & 0xffff0000);
+}
+/* end module-elf32.c */
+
+
+
+/* Change all symbols so that sh_value encodes the pointer directly. */
+static int simplify_symbols(Elf_Shdr * sechdrs,
+ unsigned int symindex,
+ const char *strtab,
+ const char *secstrings,
+ unsigned int nsecs, struct module *mod)
+{
+ Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
+ unsigned long secbase, bssbase = 0;
+ unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
+ int ret = 0, size;
+
+ /* find the .bss section for COMMON symbols */
+ for (i = 0; i < nsecs; i++) {
+ if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0)
+ bssbase = sechdrs[i].sh_addr;
+ }
+
+ for (i = 1; i < n; i++) {
+ switch (sym[i].st_shndx) {
+ case SHN_COMMON:
+ /* Allocate space for the symbol in the .bss section. st_value is currently size.
+ We want it to have the address of the symbol. */
+
+ size = sym[i].st_value;
+ sym[i].st_value = bssbase;
+
+ bssbase += size;
+ break;
+
+ case SHN_ABS:
+ /* Don't need to do anything */
+ break;
+
+ case SHN_UNDEF:
+ /* ret = -ENOENT; */
+ break;
+
+ case SHN_MIPS_SCOMMON:
+
+ printk(KERN_DEBUG
+ "simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n",
+ strtab + sym[i].st_name, sym[i].st_shndx);
+
+ // .sbss section
+ break;
+
+ default:
+ secbase = sechdrs[sym[i].st_shndx].sh_addr;
+
+ if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) {
+ save_gp_address(secbase, sym[i].st_value);
+ }
+
+ sym[i].st_value += secbase;
+ break;
+ }
+
+ }
+
+ return ret;
+}
+
+#ifdef DEBUG_ELFLOADER
+static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
+ const char *strtab, struct module *mod)
+{
+ Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
+ unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
+
+ printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n);
+ for (i = 1; i < n; i++) {
+ printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i,
+ strtab + sym[i].st_name, sym[i].st_value);
+ }
+}
+#endif
+
+static void dump_tc(struct tc *t)
+{
+ printk(KERN_WARNING "VPE: TC index %d TCStatus 0x%lx halt 0x%lx\n",
+ t->index, read_tc_c0_tcstatus(), read_tc_c0_tchalt());
+ printk(KERN_WARNING "VPE: tcrestart 0x%lx\n", read_tc_c0_tcrestart());
+}
+
+static void dump_tclist(void)
+{
+ struct tc *t;
+
+ list_for_each_entry(t, &vpecontrol.tc_list, list) {
+ dump_tc(t);
+ }
+}
+
+/* We are prepared so configure and start the VPE... */
+int vpe_run(vpe_t * v)
+{
+ unsigned long val;
+ struct tc *t;
+
+ /* check we are the Master VPE */
+ val = read_c0_vpeconf0();
+ if (!(val & VPECONF0_MVP)) {
+ printk(KERN_WARNING
+ "VPE: only Master VPE's are allowed to configure MT\n");
+ return -1;
+ }
+
+ /* disable MT (using dvpe) */
+ dvpe();
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ if (!list_empty(&v->tc)) {
+ if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
+ printk(KERN_WARNING "VPE: TC %d is already in use.\n",
+ t->index);
+ return -ENOEXEC;
+ }
+ } else {
+ printk(KERN_WARNING "VPE: No TC's associated with VPE %d\n",
+ v->minor);
+ return -ENOEXEC;
+ }
+
+ settc(t->index);
+
+ val = read_vpe_c0_vpeconf0();
+
+ /* should check it is halted, and not activated */
+ if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) {
+ printk(KERN_WARNING "VPE: TC %d is already doing something!\n",
+ t->index);
+
+ dump_tclist();
+ return -ENOEXEC;
+ }
+
+ /* Write the address we want it to start running from in the TCPC register. */
+ write_tc_c0_tcrestart((unsigned long)v->__start);
+
+ /* write the sivc_info address to tccontext */
+ write_tc_c0_tccontext((unsigned long)0);
+
+ /* Set up the XTC bit in vpeconf0 to point at our tc */
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (t->index << VPECONF0_XTC_SHIFT));
+
+ /* mark the TC as activated, not interrupt exempt and not dynamically allocatable */
+ val = read_tc_c0_tcstatus();
+ val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
+ write_tc_c0_tcstatus(val);
+
+ write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
+
+ /* set up VPE1 */
+ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); // no multiple TC's
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); // enable this VPE
+
+ /*
+ * The sde-kit passes 'memsize' to __start in $a3, so set something
+ * here...
+ * Or set $a3 (register 7) to zero and define DFLT_STACK_SIZE and
+ * DFLT_HEAP_SIZE when you compile your program
+ */
+
+ mttgpr(7, 0);
+
+ /* set config to be the same as vpe0, particularly kseg0 coherency alg */
+ write_vpe_c0_config(read_c0_config());
+
+ /* clear out any left overs from a previous program */
+ write_vpe_c0_cause(0);
+
+ /* take system out of configuration state */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ /* clear interrupts enabled IE, ERL, EXL, and KSU from c0 status */
+ write_vpe_c0_status(read_vpe_c0_status() & ~(ST0_ERL | ST0_KSU | ST0_IE | ST0_EXL));
+
+ /* set it running */
+ evpe(EVPE_ENABLE);
+
+ return 0;
+}
+
+static unsigned long find_vpe_symbols(vpe_t * v, Elf_Shdr * sechdrs,
+ unsigned int symindex, const char *strtab,
+ struct module *mod)
+{
+ Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
+ unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
+
+ for (i = 1; i < n; i++) {
+ if (strcmp(strtab + sym[i].st_name, "__start") == 0) {
+ v->__start = sym[i].st_value;
+ }
+
+ if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) {
+ v->shared_ptr = (void *)sym[i].st_value;
+ }
+ }
+
+ return 0;
+}
+
+/* Allocates a VPE with some program code space(the load address), copies the contents
+ of the program (p)buffer performing relocatations/etc, free's it when finished.
+*/
+int vpe_elfload(vpe_t * v)
+{
+ Elf_Ehdr *hdr;
+ Elf_Shdr *sechdrs;
+ long err = 0;
+ char *secstrings, *strtab = NULL;
+ unsigned int len, i, symindex = 0, strindex = 0;
+
+ struct module mod; // so we can re-use the relocations code
+
+ memset(&mod, 0, sizeof(struct module));
+ strcpy(mod.name, "VPE dummy prog module");
+
+ hdr = (Elf_Ehdr *) v->pbuffer;
+ len = v->plen;
+
+ /* Sanity checks against insmoding binaries or wrong arch,
+ weird elf version */
+ if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
+ || hdr->e_type != ET_REL || !elf_check_arch(hdr)
+ || hdr->e_shentsize != sizeof(*sechdrs)) {
+ printk(KERN_WARNING
+ "VPE program, wrong arch or weird elf version\n");
+
+ return -ENOEXEC;
+ }
+
+ if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
+ printk(KERN_ERR "VPE program length %u truncated\n", len);
+ return -ENOEXEC;
+ }
+
+ /* Convenience variables */
+ sechdrs = (void *)hdr + hdr->e_shoff;
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ sechdrs[0].sh_addr = 0;
+
+ /* And these should exist, but gcc whinges if we don't init them */
+ symindex = strindex = 0;
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+
+ if (sechdrs[i].sh_type != SHT_NOBITS
+ && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) {
+ printk(KERN_ERR "VPE program length %u truncated\n",
+ len);
+ return -ENOEXEC;
+ }
+
+ /* Mark all sections sh_addr with their address in the
+ temporary image. */
+ sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
+
+ /* Internal symbols and strings. */
+ if (sechdrs[i].sh_type == SHT_SYMTAB) {
+ symindex = i;
+ strindex = sechdrs[i].sh_link;
+ strtab = (char *)hdr + sechdrs[strindex].sh_offset;
+ }
+ }
+
+ layout_sections(&mod, hdr, sechdrs, secstrings);
+
+ v->load_addr = alloc_progmem(mod.core_size);
+ memset(v->load_addr, 0, mod.core_size);
+
+ printk("VPE elf_loader: loading to %p\n", v->load_addr);
+
+ for (i = 0; i < hdr->e_shnum; i++) {
+ void *dest;
+
+ if (!(sechdrs[i].sh_flags & SHF_ALLOC))
+ continue;
+
+ dest = v->load_addr + sechdrs[i].sh_entsize;
+
+ if (sechdrs[i].sh_type != SHT_NOBITS)
+ memcpy(dest, (void *)sechdrs[i].sh_addr,
+ sechdrs[i].sh_size);
+ /* Update sh_addr to point to copy in image. */
+ sechdrs[i].sh_addr = (unsigned long)dest;
+ }
+
+ /* Fix up syms, so that st_value is a pointer to location. */
+ err =
+ simplify_symbols(sechdrs, symindex, strtab, secstrings,
+ hdr->e_shnum, &mod);
+ if (err < 0) {
+ printk(KERN_WARNING "VPE: unable to simplify symbols\n");
+ goto cleanup;
+ }
+
+ /* Now do relocations. */
+ for (i = 1; i < hdr->e_shnum; i++) {
+ const char *strtab = (char *)sechdrs[strindex].sh_addr;
+ unsigned int info = sechdrs[i].sh_info;
+
+ /* Not a valid relocation section? */
+ if (info >= hdr->e_shnum)
+ continue;
+
+ /* Don't bother with non-allocated sections */
+ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
+ continue;
+
+ if (sechdrs[i].sh_type == SHT_REL)
+ err =
+ apply_relocations(sechdrs, strtab, symindex, i, &mod);
+ else if (sechdrs[i].sh_type == SHT_RELA)
+ err = apply_relocate_add(sechdrs, strtab, symindex, i,
+ &mod);
+ if (err < 0) {
+ printk(KERN_WARNING
+ "vpe_elfload: error in relocations err %ld\n",
+ err);
+ goto cleanup;
+ }
+ }
+
+ /* make sure it's physically written out */
+ flush_icache_range((unsigned long)v->load_addr,
+ (unsigned long)v->load_addr + v->len);
+
+ if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
+
+ printk(KERN_WARNING
+ "VPE: program doesn't contain __start or vpe_shared symbols\n");
+ err = -ENOEXEC;
+ }
+
+ printk(" elf loaded\n");
+
+cleanup:
+ return err;
+}
+
+static void dump_vpe(vpe_t * v)
+{
+ struct tc *t;
+
+ printk(KERN_DEBUG "VPEControl 0x%lx\n", read_vpe_c0_vpecontrol());
+ printk(KERN_DEBUG "VPEConf0 0x%lx\n", read_vpe_c0_vpeconf0());
+
+ list_for_each_entry(t, &vpecontrol.tc_list, list) {
+ dump_tc(t);
+ }
+}
+
+/* checks for VPE is unused and gets ready to load program */
+static int vpe_open(struct inode *inode, struct file *filp)
+{
+ int minor;
+ vpe_t *v;
+
+ /* assume only 1 device at the mo. */
+ if ((minor = MINOR(inode->i_rdev)) != 1) {
+ printk(KERN_WARNING "VPE: only vpe1 is supported\n");
+ return -ENODEV;
+ }
+
+ if ((v = get_vpe(minor)) == NULL) {
+ printk(KERN_WARNING "VPE: unable to get vpe\n");
+ return -ENODEV;
+ }
+
+ if (v->state != VPE_STATE_UNUSED) {
+ unsigned long tmp;
+ struct tc *t;
+
+ printk(KERN_WARNING "VPE: device %d already in use\n", minor);
+
+ dvpe();
+ dump_vpe(v);
+
+ printk(KERN_WARNING "VPE: re-initialising %d\n", minor);
+
+ release_progmem(v->load_addr);
+
+ t = get_tc(minor);
+ settc(minor);
+ tmp = read_tc_c0_tcstatus();
+
+ /* mark not allocated and not dynamically allocatable */
+ tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
+ tmp |= TCSTATUS_IXMT; /* interrupt exempt */
+ write_tc_c0_tcstatus(tmp);
+
+ write_tc_c0_tchalt(TCHALT_H);
+
+ }
+
+ // allocate it so when we get write ops we know it's expected.
+ v->state = VPE_STATE_INUSE;
+
+ /* this of-course trashes what was there before... */
+ v->pbuffer = vmalloc(P_SIZE);
+ v->plen = P_SIZE;
+ v->load_addr = NULL;
+ v->len = 0;
+
+ return 0;
+}
+
+static int vpe_release(struct inode *inode, struct file *filp)
+{
+ int minor, ret = 0;
+ vpe_t *v;
+ Elf_Ehdr *hdr;
+
+ minor = MINOR(inode->i_rdev);
+ if ((v = get_vpe(minor)) == NULL)
+ return -ENODEV;
+
+ // simple case of fire and forget, so tell the VPE to run...
+
+ hdr = (Elf_Ehdr *) v->pbuffer;
+ if (memcmp(hdr->e_ident, ELFMAG, 4) == 0) {
+ if (vpe_elfload(v) >= 0)
+ vpe_run(v);
+ else {
+ printk(KERN_WARNING "VPE: ELF load failed.\n");
+ ret = -ENOEXEC;
+ }
+ } else {
+ printk(KERN_WARNING "VPE: only elf files are supported\n");
+ ret = -ENOEXEC;
+ }
+
+ // cleanup any temp buffers
+ if (v->pbuffer)
+ vfree(v->pbuffer);
+ v->plen = 0;
+ return ret;
+}
+
+static ssize_t vpe_write(struct file *file, const char __user * buffer,
+ size_t count, loff_t * ppos)
+{
+ int minor;
+ size_t ret = count;
+ vpe_t *v;
+
+ minor = MINOR(file->f_dentry->d_inode->i_rdev);
+ if ((v = get_vpe(minor)) == NULL)
+ return -ENODEV;
+
+ if (v->pbuffer == NULL) {
+ printk(KERN_ERR "vpe_write: no pbuffer\n");
+ return -ENOMEM;
+ }
+
+ if ((count + v->len) > v->plen) {
+ printk(KERN_WARNING
+ "VPE Loader: elf size too big. Perhaps strip uneeded symbols\n");
+ return -ENOMEM;
+ }
+
+ count -= copy_from_user(v->pbuffer + v->len, buffer, count);
+ if (!count) {
+ printk("vpe_write: copy_to_user failed\n");
+ return -EFAULT;
+ }
+
+ v->len += count;
+ return ret;
+}
+
+static struct file_operations vpe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpe_open,
+ .release = vpe_release,
+ .write = vpe_write
+};
+
+/* module wrapper entry points */
+/* give me a vpe */
+vpe_handle vpe_alloc(void)
+{
+ int i;
+ struct vpe *v;
+
+ /* find a vpe */
+ for (i = 1; i < MAX_VPES; i++) {
+ if ((v = get_vpe(i)) != NULL) {
+ v->state = VPE_STATE_INUSE;
+ return v;
+ }
+ }
+ return NULL;
+}
+
+EXPORT_SYMBOL(vpe_alloc);
+
+/* start running from here */
+int vpe_start(vpe_handle vpe, unsigned long start)
+{
+ struct vpe *v = vpe;
+
+ v->__start = start;
+ return vpe_run(v);
+}
+
+EXPORT_SYMBOL(vpe_start);
+
+/* halt it for now */
+int vpe_stop(vpe_handle vpe)
+{
+ struct vpe *v = vpe;
+ struct tc *t;
+ unsigned int evpe_flags;
+
+ evpe_flags = dvpe();
+
+ if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) {
+
+ settc(t->index);
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
+ }
+
+ evpe(evpe_flags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vpe_stop);
+
+/* I've done with it thank you */
+int vpe_free(vpe_handle vpe)
+{
+ struct vpe *v = vpe;
+ struct tc *t;
+ unsigned int evpe_flags;
+
+ if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
+ return -ENOEXEC;
+ }
+
+ evpe_flags = dvpe();
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ settc(t->index);
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
+
+ /* mark the TC unallocated and halt'ed */
+ write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
+ write_tc_c0_tchalt(TCHALT_H);
+
+ v->state = VPE_STATE_UNUSED;
+
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+ evpe(evpe_flags);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vpe_free);
+
+void *vpe_get_shared(int index)
+{
+ struct vpe *v;
+
+ if ((v = get_vpe(index)) == NULL) {
+ printk(KERN_WARNING "vpe: invalid vpe index %d\n", index);
+ return NULL;
+ }
+
+ return v->shared_ptr;
+}
+
+EXPORT_SYMBOL(vpe_get_shared);
+
+static int __init vpe_module_init(void)
+{
+ struct vpe *v = NULL;
+ struct tc *t;
+ unsigned long val;
+ int i;
+
+ if (!cpu_has_mipsmt) {
+ printk("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
+
+ if ((major = register_chrdev(VPE_MAJOR, module_name, &vpe_fops) < 0)) {
+ printk("VPE loader: unable to register character device\n");
+ return -EBUSY;
+ }
+
+ if (major == 0)
+ major = VPE_MAJOR;
+
+ dmt();
+ dvpe();
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ /* dump_mtregs(); */
+
+ INIT_LIST_HEAD(&vpecontrol.vpe_list);
+ INIT_LIST_HEAD(&vpecontrol.tc_list);
+
+ val = read_c0_mvpconf0();
+ for (i = 0; i < ((val & MVPCONF0_PTC) + 1); i++) {
+ t = alloc_tc(i);
+
+ /* VPE's */
+ if (i < ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1) {
+ settc(i);
+
+ if ((v = alloc_vpe(i)) == NULL) {
+ printk(KERN_WARNING "VPE: unable to allocate VPE\n");
+ return -ENODEV;
+ }
+
+ list_add(&t->tc, &v->tc); /* add the tc to the list of this vpe's tc's. */
+
+ /* deactivate all but vpe0 */
+ if (i != 0) {
+ unsigned long tmp = read_vpe_c0_vpeconf0();
+
+ tmp &= ~VPECONF0_VPA;
+
+ /* master VPE */
+ tmp |= VPECONF0_MVP;
+ write_vpe_c0_vpeconf0(tmp);
+ }
+
+ /* disable multi-threading with TC's */
+ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
+
+ if (i != 0) {
+ write_vpe_c0_status((read_c0_status() &
+ ~(ST0_IM | ST0_IE | ST0_KSU))
+ | ST0_CU0);
+
+ /* set config to be the same as vpe0, particularly kseg0 coherency alg */
+ write_vpe_c0_config(read_c0_config());
+ }
+
+ }
+
+ /* TC's */
+ t->pvpe = v; /* set the parent vpe */
+
+ if (i != 0) {
+ unsigned long tmp;
+
+ /* tc 0 will of course be running.... */
+ if (i == 0)
+ t->state = TC_STATE_RUNNING;
+
+ settc(i);
+
+ /* bind a TC to each VPE, May as well put all excess TC's
+ on the last VPE */
+ if (i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1))
+ write_tc_c0_tcbind(read_tc_c0_tcbind() |
+ ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT));
+ else
+ write_tc_c0_tcbind(read_tc_c0_tcbind() | i);
+
+ tmp = read_tc_c0_tcstatus();
+
+ /* mark not allocated and not dynamically allocatable */
+ tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
+ tmp |= TCSTATUS_IXMT; /* interrupt exempt */
+ write_tc_c0_tcstatus(tmp);
+
+ write_tc_c0_tchalt(TCHALT_H);
+ }
+ }
+
+ /* release config state */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ return 0;
+}
+
+static void __exit vpe_module_exit(void)
+{
+ struct vpe *v, *n;
+
+ list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
+ if (v->state != VPE_STATE_UNUSED) {
+ release_vpe(v);
+ }
+ }
+
+ unregister_chrdev(major, module_name);
+}
+
+module_init(vpe_module_init);
+module_exit(vpe_module_exit);
+MODULE_DESCRIPTION("MIPS VPE Loader");
+MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc");
+MODULE_LICENSE("GPL");