aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/asm-offsets.c3
-rw-r--r--arch/mips/kernel/cpu-bugs64.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c40
-rw-r--r--arch/mips/kernel/init_task.c5
-rw-r--r--arch/mips/kernel/kspd.c39
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/mips-mt.c10
-rw-r--r--arch/mips/kernel/octeon_switch.S3
-rw-r--r--arch/mips/kernel/r2300_switch.S3
-rw-r--r--arch/mips/kernel/r4k_switch.S3
-rw-r--r--arch/mips/kernel/rtlx.c17
-rw-r--r--arch/mips/kernel/scall32-o32.S74
-rw-r--r--arch/mips/kernel/scall64-64.S74
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c2
-rw-r--r--arch/mips/kernel/smp-cmp.c6
-rw-r--r--arch/mips/kernel/smp-mt.c6
-rw-r--r--arch/mips/kernel/smp-up.c3
-rw-r--r--arch/mips/kernel/smp.c21
-rw-r--r--arch/mips/kernel/smtc.c21
-rw-r--r--arch/mips/kernel/syscall.c112
-rw-r--r--arch/mips/kernel/traps.c5
-rw-r--r--arch/mips/kernel/vmlinux.lds.S127
-rw-r--r--arch/mips/kernel/vpe.c79
26 files changed, 289 insertions, 374 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index e96122159928..eecd2a9f155c 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -2,6 +2,8 @@
# Makefile for the Linux/MIPS kernel.
#
+CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
+
extra-y := head.o init_task.o vmlinux.lds
obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 8d006ec65677..2c1e1d02338b 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -183,9 +183,6 @@ void output_mm_defines(void)
OFFSET(MM_PGD, mm_struct, pgd);
OFFSET(MM_CONTEXT, mm_struct, context);
BLANK();
- DEFINE(_PAGE_SIZE, PAGE_SIZE);
- DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
- BLANK();
DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
DEFINE(_PTE_T_SIZE, sizeof(pte_t));
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 02b7713cf71c..408d0a07b3a3 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -167,7 +167,7 @@ static inline void check_mult_sh(void)
panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
}
-static volatile int daddi_ov __cpuinitdata = 0;
+static volatile int daddi_ov __cpuinitdata;
asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
{
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 1abe9905c9c1..f709657e4dcd 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -31,7 +31,7 @@
* The wait instruction stops the pipeline and reduces the power consumption of
* the CPU very much.
*/
-void (*cpu_wait)(void) = NULL;
+void (*cpu_wait)(void);
static void r3081_wait(void)
{
@@ -91,16 +91,13 @@ static void rm7k_wait_irqoff(void)
local_irq_enable();
}
-/* The Au1xxx wait is available only if using 32khz counter or
- * external timer source, but specifically not CP0 Counter. */
-int allow_au1k_wait;
-
+/*
+ * The Au1xxx wait is available only if using 32khz counter or
+ * external timer source, but specifically not CP0 Counter.
+ * alchemy/common/time.c may override cpu_wait!
+ */
static void au1k_wait(void)
{
- if (!allow_au1k_wait)
- return;
-
- /* using the wait instruction makes CP0 counter unusable */
__asm__(" .set mips3 \n"
" cache 0x14, 0(%0) \n"
" cache 0x14, 32(%0) \n"
@@ -115,7 +112,7 @@ static void au1k_wait(void)
: : "r" (au1k_wait));
}
-static int __initdata nowait = 0;
+static int __initdata nowait;
static int __init wait_disable(char *s)
{
@@ -159,6 +156,9 @@ void __init check_wait(void)
case CPU_25KF:
case CPU_PR4450:
case CPU_BCM3302:
+ case CPU_BCM6338:
+ case CPU_BCM6348:
+ case CPU_BCM6358:
case CPU_CAVIUM_OCTEON:
cpu_wait = r4k_wait;
break;
@@ -857,6 +857,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
decode_configs(c);
switch (c->processor_id & 0xff00) {
case PRID_IMP_BCM3302:
+ /* same as PRID_IMP_BCM6338 */
c->cputype = CPU_BCM3302;
__cpu_name[cpu] = "Broadcom BCM3302";
break;
@@ -864,6 +865,25 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_BCM4710;
__cpu_name[cpu] = "Broadcom BCM4710";
break;
+ case PRID_IMP_BCM6345:
+ c->cputype = CPU_BCM6345;
+ __cpu_name[cpu] = "Broadcom BCM6345";
+ break;
+ case PRID_IMP_BCM6348:
+ c->cputype = CPU_BCM6348;
+ __cpu_name[cpu] = "Broadcom BCM6348";
+ break;
+ case PRID_IMP_BCM4350:
+ switch (c->processor_id & 0xf0) {
+ case PRID_REV_BCM6358:
+ c->cputype = CPU_BCM6358;
+ __cpu_name[cpu] = "Broadcom BCM6358";
+ break;
+ default:
+ c->cputype = CPU_UNKNOWN;
+ break;
+ }
+ break;
}
}
diff --git a/arch/mips/kernel/init_task.c b/arch/mips/kernel/init_task.c
index 5b457a40c784..6d6ca5305895 100644
--- a/arch/mips/kernel/init_task.c
+++ b/arch/mips/kernel/init_task.c
@@ -21,9 +21,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
*
* The things we do for performance..
*/
-union thread_union init_thread_union
- __attribute__((__section__(".data.init_task"),
- __aligned__(THREAD_SIZE))) =
+union thread_union init_thread_union __init_task_data
+ __attribute__((__aligned__(THREAD_SIZE))) =
{ INIT_THREAD_INFO(init_task) };
/*
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index fd6e51224034..ad4e017ed2f3 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -31,7 +31,7 @@
#include <asm/rtlx.h>
#include <asm/kspd.h>
-static struct workqueue_struct *workqueue = NULL;
+static struct workqueue_struct *workqueue;
static struct work_struct work;
extern unsigned long cpu_khz;
@@ -58,7 +58,7 @@ struct mtsp_syscall_generic {
};
static struct list_head kspd_notifylist;
-static int sp_stopping = 0;
+static int sp_stopping;
/* these should match with those in the SDE kit */
#define MTSP_SYSCALL_BASE 0
@@ -172,13 +172,20 @@ static unsigned int translate_open_flags(int flags)
}
-static void sp_setfsuidgid( uid_t uid, gid_t gid)
+static int sp_setfsuidgid(uid_t uid, gid_t gid)
{
- current->cred->fsuid = uid;
- current->cred->fsgid = gid;
+ struct cred *new;
- key_fsuid_changed(current);
- key_fsgid_changed(current);
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ new->fsuid = uid;
+ new->fsgid = gid;
+
+ commit_creds(new);
+
+ return 0;
}
/*
@@ -196,7 +203,7 @@ void sp_work_handle_request(void)
mm_segment_t old_fs;
struct timeval tv;
struct timezone tz;
- int cmd;
+ int err, cmd;
char *vcwd;
int size;
@@ -225,8 +232,11 @@ void sp_work_handle_request(void)
/* Run the syscall at the privilege of the user who loaded the
SP program */
- if (vpe_getuid(tclimit))
- sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit));
+ if (vpe_getuid(tclimit)) {
+ err = sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit));
+ if (!err)
+ pr_err("Change of creds failed\n");
+ }
switch (sc.cmd) {
/* needs the flags argument translating from SDE kit to
@@ -283,8 +293,11 @@ void sp_work_handle_request(void)
break;
} /* switch */
- if (vpe_getuid(tclimit))
- sp_setfsuidgid( 0, 0);
+ if (vpe_getuid(tclimit)) {
+ err = sp_setfsuidgid(0, 0);
+ if (!err)
+ pr_err("restoring old creds failed\n");
+ }
old_fs = get_fs();
set_fs(KERNEL_DS);
@@ -328,7 +341,7 @@ static void sp_cleanup(void)
sys_chdir("/");
}
-static int channel_open = 0;
+static int channel_open;
/* the work handler */
static void sp_work(struct work_struct *unused)
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 42461310b185..cbc6182b0065 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -18,7 +18,7 @@
cpumask_t mt_fpu_cpumask;
static int fpaff_threshold = -1;
-unsigned long mt_fpemul_threshold = 0;
+unsigned long mt_fpemul_threshold;
/*
* Replacement functions for the sys_sched_setaffinity() and
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index d01665a453f5..b2259e7cd829 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -125,10 +125,10 @@ void mips_mt_regdump(unsigned long mvpctl)
local_irq_restore(flags);
}
-static int mt_opt_norps = 0;
+static int mt_opt_norps;
static int mt_opt_rpsctl = -1;
static int mt_opt_nblsu = -1;
-static int mt_opt_forceconfig7 = 0;
+static int mt_opt_forceconfig7;
static int mt_opt_config7 = -1;
static int __init rps_disable(char *s)
@@ -161,8 +161,8 @@ static int __init config7_set(char *str)
__setup("config7=", config7_set);
/* Experimental cache flush control parameters that should go away some day */
-int mt_protiflush = 0;
-int mt_protdflush = 0;
+int mt_protiflush;
+int mt_protdflush;
int mt_n_iflushes = 1;
int mt_n_dflushes = 1;
@@ -194,7 +194,7 @@ static int __init ndflush(char *s)
}
__setup("ndflush=", ndflush);
-static unsigned int itc_base = 0;
+static unsigned int itc_base;
static int __init set_itc_base(char *str)
{
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index d52389672b06..3952b8323efa 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -36,9 +36,6 @@
.align 7
LEAF(resume)
.set arch=octeon
-#ifndef CONFIG_CPU_HAS_LLSC
- sw zero, ll_bit
-#endif
mfc0 t1, CP0_STATUS
LONG_S t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 656bde2e11b1..698414b7a253 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -46,9 +46,6 @@
* struct thread_info *next_ti) )
*/
LEAF(resume)
-#ifndef CONFIG_CPU_HAS_LLSC
- sw zero, ll_bit
-#endif
mfc0 t1, CP0_STATUS
sw t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index d9bfae53c43f..8893ee1a2368 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -45,9 +45,6 @@
*/
.align 5
LEAF(resume)
-#ifndef CONFIG_CPU_HAS_LLSC
- sw zero, ll_bit
-#endif
mfc0 t1, CP0_STATUS
LONG_S t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 4ce93aa7b372..364f066cb497 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -57,7 +57,7 @@ static struct chan_waitqueues {
} channel_wqs[RTLX_CHANNELS];
static struct vpe_notifications notify;
-static int sp_stopping = 0;
+static int sp_stopping;
extern void *vpe_get_shared(int index);
@@ -72,8 +72,9 @@ static void rtlx_dispatch(void)
*/
static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
{
+ unsigned int vpeflags;
+ unsigned long flags;
int i;
- unsigned int flags, vpeflags;
/* Ought not to be strictly necessary for SMTC builds */
local_irq_save(flags);
@@ -392,20 +393,12 @@ out:
static int file_open(struct inode *inode, struct file *filp)
{
- int minor = iminor(inode);
- int err;
-
- lock_kernel();
- err = rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1);
- unlock_kernel();
- return err;
+ return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1);
}
static int file_release(struct inode *inode, struct file *filp)
{
- int minor = iminor(inode);
-
- return rtlx_release(minor);
+ return rtlx_release(iminor(inode));
}
static unsigned int file_poll(struct file *file, poll_table * wait)
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index b57082123536..fd2a9bb620d6 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -187,78 +187,6 @@ illegal_syscall:
j o32_syscall_exit
END(handle_sys)
- LEAF(mips_atomic_set)
- andi v0, a1, 3 # must be word aligned
- bnez v0, bad_alignment
-
- lw v1, TI_ADDR_LIMIT($28) # in legal address range?
- addiu a0, a1, 4
- or a0, a0, a1
- and a0, a0, v1
- bltz a0, bad_address
-
-#ifdef CONFIG_CPU_HAS_LLSC
- /* Ok, this is the ll/sc case. World is sane :-) */
-1: ll v0, (a1)
- move a0, a2
-2: sc a0, (a1)
-#if R10000_LLSC_WAR
- beqzl a0, 1b
-#else
- beqz a0, 1b
-#endif
-
- .section __ex_table,"a"
- PTR 1b, bad_stack
- PTR 2b, bad_stack
- .previous
-#else
- sw a1, 16(sp)
- sw a2, 20(sp)
-
- move a0, sp
- move a2, a1
- li a1, 1
- jal do_page_fault
-
- lw a1, 16(sp)
- lw a2, 20(sp)
-
- /*
- * At this point the page should be readable and writable unless
- * there was no more memory available.
- */
-1: lw v0, (a1)
-2: sw a2, (a1)
-
- .section __ex_table,"a"
- PTR 1b, no_mem
- PTR 2b, no_mem
- .previous
-#endif
-
- sw zero, PT_R7(sp) # success
- sw v0, PT_R2(sp) # result
-
- j o32_syscall_exit # continue like a normal syscall
-
-no_mem: li v0, -ENOMEM
- jr ra
-
-bad_address:
- li v0, -EFAULT
- jr ra
-
-bad_alignment:
- li v0, -EINVAL
- jr ra
- END(mips_atomic_set)
-
- LEAF(sys_sysmips)
- beq a0, MIPS_ATOMIC_SET, mips_atomic_set
- j _sys_sysmips
- END(sys_sysmips)
-
LEAF(sys_syscall)
subu t0, a0, __NR_O32_Linux # check syscall number
sltiu v0, t0, __NR_O32_Linux_syscalls + 1
@@ -653,7 +581,7 @@ einval: li v0, -ENOSYS
sys sys_preadv 6 /* 4330 */
sys sys_pwritev 6
sys sys_rt_tgsigqueueinfo 4
- sys sys_perf_counter_open 5
+ sys sys_perf_event_open 5
sys sys_accept4 4
.endm
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 3d866f24e064..18bf7f32c5e4 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -124,78 +124,6 @@ illegal_syscall:
j n64_syscall_exit
END(handle_sys64)
- LEAF(mips_atomic_set)
- andi v0, a1, 3 # must be word aligned
- bnez v0, bad_alignment
-
- LONG_L v1, TI_ADDR_LIMIT($28) # in legal address range?
- LONG_ADDIU a0, a1, 4
- or a0, a0, a1
- and a0, a0, v1
- bltz a0, bad_address
-
-#ifdef CONFIG_CPU_HAS_LLSC
- /* Ok, this is the ll/sc case. World is sane :-) */
-1: ll v0, (a1)
- move a0, a2
-2: sc a0, (a1)
-#if R10000_LLSC_WAR
- beqzl a0, 1b
-#else
- beqz a0, 1b
-#endif
-
- .section __ex_table,"a"
- PTR 1b, bad_stack
- PTR 2b, bad_stack
- .previous
-#else
- sw a1, 16(sp)
- sw a2, 20(sp)
-
- move a0, sp
- move a2, a1
- li a1, 1
- jal do_page_fault
-
- lw a1, 16(sp)
- lw a2, 20(sp)
-
- /*
- * At this point the page should be readable and writable unless
- * there was no more memory available.
- */
-1: lw v0, (a1)
-2: sw a2, (a1)
-
- .section __ex_table,"a"
- PTR 1b, no_mem
- PTR 2b, no_mem
- .previous
-#endif
-
- sd zero, PT_R7(sp) # success
- sd v0, PT_R2(sp) # result
-
- j n64_syscall_exit # continue like a normal syscall
-
-no_mem: li v0, -ENOMEM
- jr ra
-
-bad_address:
- li v0, -EFAULT
- jr ra
-
-bad_alignment:
- li v0, -EINVAL
- jr ra
- END(mips_atomic_set)
-
- LEAF(sys_sysmips)
- beq a0, MIPS_ATOMIC_SET, mips_atomic_set
- j _sys_sysmips
- END(sys_sysmips)
-
.align 3
sys_call_table:
PTR sys_read /* 5000 */
@@ -490,6 +418,6 @@ sys_call_table:
PTR sys_preadv
PTR sys_pwritev /* 5390 */
PTR sys_rt_tgsigqueueinfo
- PTR sys_perf_counter_open
+ PTR sys_perf_event_open
PTR sys_accept4
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 1a6ae124635b..6ebc07976694 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -416,6 +416,6 @@ EXPORT(sysn32_call_table)
PTR sys_preadv
PTR sys_pwritev
PTR compat_sys_rt_tgsigqueueinfo /* 5295 */
- PTR sys_perf_counter_open
+ PTR sys_perf_event_open
PTR sys_accept4
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index cd31087a651f..9bbf9775e0bd 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -536,6 +536,6 @@ sys_call_table:
PTR compat_sys_preadv /* 4330 */
PTR compat_sys_pwritev
PTR compat_sys_rt_tgsigqueueinfo
- PTR sys_perf_counter_open
+ PTR sys_perf_event_open
PTR sys_accept4
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 2950b97253b7..2b290d70083e 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -441,7 +441,7 @@ static void __init bootmem_init(void)
* initialization hook for anything else was introduced.
*/
-static int usermem __initdata = 0;
+static int usermem __initdata;
static int __init early_parse_mem(char *p)
{
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index ad0ff5dc4d59..cc81771b882c 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -80,11 +80,11 @@ void cmp_send_ipi_single(int cpu, unsigned int action)
local_irq_restore(flags);
}
-static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
cmp_send_ipi_single(i, action);
}
@@ -171,7 +171,7 @@ void __init cmp_smp_setup(void)
for (i = 1; i < NR_CPUS; i++) {
if (amon_cpu_avail(i)) {
- cpu_set(i, cpu_possible_map);
+ set_cpu_possible(i, true);
__cpu_number_map[i] = ++ncpu;
__cpu_logical_map[ncpu] = i;
}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 6f7ee5ac46ee..43e7cdc5ded2 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -70,7 +70,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
write_vpe_c0_vpeconf0(tmp);
/* Record this as available CPU */
- cpu_set(tc, cpu_possible_map);
+ set_cpu_possible(tc, true);
__cpu_number_map[tc] = ++ncpu;
__cpu_logical_map[ncpu] = tc;
}
@@ -141,11 +141,11 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
local_irq_restore(flags);
}
-static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
+static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
- for_each_cpu_mask(i, mask)
+ for_each_cpu(i, mask)
vsmp_send_ipi_single(i, action);
}
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
index 2508d55d68fd..00500fea2750 100644
--- a/arch/mips/kernel/smp-up.c
+++ b/arch/mips/kernel/smp-up.c
@@ -18,7 +18,8 @@ static void up_send_ipi_single(int cpu, unsigned int action)
panic(KERN_ERR "%s called", __func__);
}
-static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action)
+static inline void up_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
{
panic(KERN_ERR "%s called", __func__);
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index bc7d9b05e2f4..e72e6844d134 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -49,8 +49,6 @@ volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
-extern void cpu_idle(void);
-
/* Number of TCs (or siblings in Intel speak) per CPU core */
int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);
@@ -129,19 +127,6 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_idle();
}
-void arch_send_call_function_ipi(cpumask_t mask)
-{
- mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
-}
-
-/*
- * We reuse the same vector for the single IPI
- */
-void arch_send_call_function_single_ipi(int cpu)
-{
- mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
-}
-
/*
* Call into both interrupt handlers, as we share the IPI for them
*/
@@ -184,15 +169,15 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
#ifndef CONFIG_HOTPLUG_CPU
- cpu_present_map = cpu_possible_map;
+ init_cpu_present(&cpu_possible_map);
#endif
}
/* preload SMP state for boot cpu */
void __devinit smp_prepare_boot_cpu(void)
{
- cpu_set(0, cpu_possible_map);
- cpu_set(0, cpu_online_map);
+ set_cpu_possible(0, true);
+ set_cpu_online(0, true);
cpu_set(0, cpu_callin_map);
}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index c16bb6d6c25c..4d181df44a40 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -95,14 +95,14 @@ void init_smtc_stats(void);
/* Global SMTC Status */
-unsigned int smtc_status = 0;
+unsigned int smtc_status;
/* Boot command line configuration overrides */
static int vpe0limit;
-static int ipibuffers = 0;
-static int nostlb = 0;
-static int asidmask = 0;
+static int ipibuffers;
+static int nostlb;
+static int asidmask;
unsigned long smtc_asid_mask = 0xff;
static int __init vpe0tcs(char *str)
@@ -151,7 +151,7 @@ __setup("asidmask=", asidmask_set);
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
-static int hang_trig = 0;
+static int hang_trig;
static int __init hangtrig_enable(char *s)
{
@@ -305,7 +305,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
*/
ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
- cpu_set(i, cpu_possible_map);
+ set_cpu_possible(i, true);
__cpu_number_map[i] = i;
__cpu_logical_map[i] = i;
}
@@ -525,8 +525,8 @@ void smtc_prepare_cpus(int cpus)
* Pull any physically present but unused TCs out of circulation.
*/
while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
- cpu_clear(tc, cpu_possible_map);
- cpu_clear(tc, cpu_present_map);
+ set_cpu_possible(tc, false);
+ set_cpu_present(tc, false);
tc++;
}
@@ -1098,9 +1098,8 @@ static void ipi_irq_dispatch(void)
static struct irqaction irq_ipi = {
.handler = ipi_interrupt,
- .flags = IRQF_DISABLED,
- .name = "SMTC_IPI",
- .flags = IRQF_PERCPU
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+ .name = "SMTC_IPI"
};
static void setup_cross_vpe_interrupts(unsigned int nvpe)
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 8cf384644040..3fe1fcfa2e73 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -28,7 +28,9 @@
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/ipc.h>
+#include <linux/uaccess.h>
+#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/cachectl.h>
#include <asm/cacheflush.h>
@@ -290,12 +292,116 @@ SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
return 0;
}
-asmlinkage int _sys_sysmips(long cmd, long arg1, long arg2, long arg3)
+static inline int mips_atomic_set(struct pt_regs *regs,
+ unsigned long addr, unsigned long new)
{
+ unsigned long old, tmp;
+ unsigned int err;
+
+ if (unlikely(addr & 3))
+ return -EINVAL;
+
+ if (unlikely(!access_ok(VERIFY_WRITE, addr, 4)))
+ return -EINVAL;
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ __asm__ __volatile__ (
+ " li %[err], 0 \n"
+ "1: ll %[old], (%[addr]) \n"
+ " move %[tmp], %[new] \n"
+ "2: sc %[tmp], (%[addr]) \n"
+ " beqzl %[tmp], 1b \n"
+ "3: \n"
+ " .section .fixup,\"ax\" \n"
+ "4: li %[err], %[efault] \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "STR(PTR)" 1b, 4b \n"
+ " "STR(PTR)" 2b, 4b \n"
+ " .previous \n"
+ : [old] "=&r" (old),
+ [err] "=&r" (err),
+ [tmp] "=&r" (tmp)
+ : [addr] "r" (addr),
+ [new] "r" (new),
+ [efault] "i" (-EFAULT)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__ (
+ " li %[err], 0 \n"
+ "1: ll %[old], (%[addr]) \n"
+ " move %[tmp], %[new] \n"
+ "2: sc %[tmp], (%[addr]) \n"
+ " bnez %[tmp], 4f \n"
+ "3: \n"
+ " .subsection 2 \n"
+ "4: b 1b \n"
+ " .previous \n"
+ " \n"
+ " .section .fixup,\"ax\" \n"
+ "5: li %[err], %[efault] \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "STR(PTR)" 1b, 5b \n"
+ " "STR(PTR)" 2b, 5b \n"
+ " .previous \n"
+ : [old] "=&r" (old),
+ [err] "=&r" (err),
+ [tmp] "=&r" (tmp)
+ : [addr] "r" (addr),
+ [new] "r" (new),
+ [efault] "i" (-EFAULT)
+ : "memory");
+ } else {
+ do {
+ preempt_disable();
+ ll_bit = 1;
+ ll_task = current;
+ preempt_enable();
+
+ err = __get_user(old, (unsigned int *) addr);
+ err |= __put_user(new, (unsigned int *) addr);
+ if (err)
+ break;
+ rmb();
+ } while (!ll_bit);
+ }
+
+ if (unlikely(err))
+ return err;
+
+ regs->regs[2] = old;
+ regs->regs[7] = 0; /* No error */
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ " move $29, %0 \n"
+ " j syscall_exit \n"
+ : /* no outputs */
+ : "r" (regs));
+
+ /* unreached. Honestly. */
+ while (1);
+}
+
+save_static_function(sys_sysmips);
+static int __used noinline
+_sys_sysmips(nabi_no_regargs struct pt_regs regs)
+{
+ long cmd, arg1, arg2, arg3;
+
+ cmd = regs.regs[4];
+ arg1 = regs.regs[5];
+ arg2 = regs.regs[6];
+ arg3 = regs.regs[7];
+
switch (cmd) {
case MIPS_ATOMIC_SET:
- printk(KERN_CRIT "How did I get here?\n");
- return -EINVAL;
+ return mips_atomic_set(&regs, arg1, arg2);
case MIPS_FIXADE:
if (arg1 & ~3)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 08f1edf355e8..0a18b4c62afb 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -466,9 +466,8 @@ asmlinkage void do_be(struct pt_regs *regs)
* The ll_bit is cleared by r*_switch.S
*/
-unsigned long ll_bit;
-
-static struct task_struct *ll_task = NULL;
+unsigned int ll_bit;
+struct task_struct *ll_task;
static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
{
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 58738c8d754f..162b29954baa 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -1,4 +1,5 @@
#include <asm/asm-offsets.h>
+#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
#undef mips
@@ -9,7 +10,16 @@ PHDRS {
text PT_LOAD FLAGS(7); /* RWX */
note PT_NOTE FLAGS(4); /* R__ */
}
-jiffies = JIFFIES;
+
+#ifdef CONFIG_32BIT
+ #ifdef CONFIG_CPU_LITTLE_ENDIAN
+ jiffies = jiffies_64;
+ #else
+ jiffies = jiffies_64 + 4;
+ #endif
+#else
+ jiffies = jiffies_64;
+#endif
SECTIONS
{
@@ -28,7 +38,7 @@ SECTIONS
/* . = 0xa800000000300000; */
. = 0xffffffff80300000;
#endif
- . = LOADADDR;
+ . = VMLINUX_LOAD_ADDRESS;
/* read-only */
_text = .; /* Text and read-only data */
.text : {
@@ -42,13 +52,7 @@ SECTIONS
} :text = 0
_etext = .; /* End of text section */
- /* Exception table */
- . = ALIGN(16);
- __ex_table : {
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
- }
+ EXCEPTION_TABLE(16)
/* Exception table for data bus errors */
__dbe_table : {
@@ -65,20 +69,10 @@ SECTIONS
/* writeable */
.data : { /* Data */
. = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
- /*
- * This ALIGN is needed as a workaround for a bug a
- * gcc bug upto 4.1 which limits the maximum alignment
- * to at most 32kB and results in the following
- * warning:
- *
- * CC arch/mips/kernel/init_task.o
- * arch/mips/kernel/init_task.c:30: warning: alignment
- * of ‘init_thread_union’ is greater than maximum
- * object file alignment. Using 32768
- */
- . = ALIGN(_PAGE_SIZE);
- *(.data.init_task)
+ INIT_TASK_DATA(PAGE_SIZE)
+ NOSAVE_DATA
+ CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
DATA_DATA
CONSTRUCTORS
}
@@ -95,51 +89,13 @@ SECTIONS
.sdata : {
*(.sdata)
}
-
- . = ALIGN(_PAGE_SIZE);
- .data_nosave : {
- __nosave_begin = .;
- *(.data.nosave)
- }
- . = ALIGN(_PAGE_SIZE);
- __nosave_end = .;
-
- . = ALIGN(1 << CONFIG_MIPS_L1_CACHE_SHIFT);
- .data.cacheline_aligned : {
- *(.data.cacheline_aligned)
- }
_edata = .; /* End of data section */
/* will be freed after init */
- . = ALIGN(_PAGE_SIZE); /* Init code and data */
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
- .init.text : {
- _sinittext = .;
- INIT_TEXT
- _einittext = .;
- }
- .init.data : {
- INIT_DATA
- }
- . = ALIGN(16);
- .init.setup : {
- __setup_start = .;
- *(.init.setup)
- __setup_end = .;
- }
-
- .initcall.init : {
- __initcall_start = .;
- INITCALLS
- __initcall_end = .;
- }
-
- .con_initcall.init : {
- __con_initcall_start = .;
- *(.con_initcall.init)
- __con_initcall_end = .;
- }
- SECURITY_INIT
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
/* .exit.text is discarded at runtime, not link time, to deal with
* references from .rodata
@@ -150,43 +106,16 @@ SECTIONS
.exit.data : {
EXIT_DATA
}
-#if defined(CONFIG_BLK_DEV_INITRD)
- . = ALIGN(_PAGE_SIZE);
- .init.ramfs : {
- __initramfs_start = .;
- *(.init.ramfs)
- __initramfs_end = .;
- }
-#endif
- PERCPU(_PAGE_SIZE)
- . = ALIGN(_PAGE_SIZE);
+
+ PERCPU(PAGE_SIZE)
+ . = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
- __bss_start = .; /* BSS */
- .sbss : {
- *(.sbss)
- *(.scommon)
- }
- .bss : {
- *(.bss)
- *(COMMON)
- }
- __bss_stop = .;
+ BSS_SECTION(0, 0, 0)
_end = . ;
- /* Sections to be discarded */
- /DISCARD/ : {
- *(.exitcall.exit)
-
- /* ABI crap starts here */
- *(.MIPS.options)
- *(.options)
- *(.pdr)
- *(.reginfo)
- }
-
/* These mark the ABI of the kernel for debuggers. */
.mdebug.abi32 : {
KEEP(*(.mdebug.abi32))
@@ -212,4 +141,14 @@ SECTIONS
*(.gptab.bss)
*(.gptab.sbss)
}
+
+ /* Sections to be discarded */
+ DISCARDS
+ /DISCARD/ : {
+ /* ABI crap starts here */
+ *(.MIPS.options)
+ *(.options)
+ *(.pdr)
+ *(.reginfo)
+ }
}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 9a1ab7e87fd4..03092ab2a296 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -74,7 +74,7 @@ static const int minor = 1; /* fixed for now */
#ifdef CONFIG_MIPS_APSP_KSPD
static struct kspd_notifications kspd_events;
-static int kspd_events_reqd = 0;
+static int kspd_events_reqd;
#endif
/* grab the likely amount of memory we will need. */
@@ -144,14 +144,15 @@ struct tc {
};
struct {
- /* Virtual processing elements */
- struct list_head vpe_list;
-
- /* Thread contexts */
- struct list_head tc_list;
+ spinlock_t vpe_list_lock;
+ struct list_head vpe_list; /* Virtual processing elements */
+ spinlock_t tc_list_lock;
+ struct list_head tc_list; /* Thread contexts */
} vpecontrol = {
- .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
- .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
+ .vpe_list_lock = SPIN_LOCK_UNLOCKED,
+ .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
+ .tc_list_lock = SPIN_LOCK_UNLOCKED,
+ .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
};
static void release_progmem(void *ptr);
@@ -159,28 +160,38 @@ static void release_progmem(void *ptr);
/* get the vpe associated with this minor */
static struct vpe *get_vpe(int minor)
{
- struct vpe *v;
+ struct vpe *res, *v;
if (!cpu_has_mipsmt)
return NULL;
+ res = NULL;
+ spin_lock(&vpecontrol.vpe_list_lock);
list_for_each_entry(v, &vpecontrol.vpe_list, list) {
- if (v->minor == minor)
- return v;
+ if (v->minor == minor) {
+ res = v;
+ break;
+ }
}
+ spin_unlock(&vpecontrol.vpe_list_lock);
- return NULL;
+ return res;
}
/* get the vpe associated with this minor */
static struct tc *get_tc(int index)
{
- struct tc *t;
+ struct tc *res, *t;
+ res = NULL;
+ spin_lock(&vpecontrol.tc_list_lock);
list_for_each_entry(t, &vpecontrol.tc_list, list) {
- if (t->index == index)
- return t;
+ if (t->index == index) {
+ res = t;
+ break;
+ }
}
+ spin_unlock(&vpecontrol.tc_list_lock);
return NULL;
}
@@ -190,15 +201,17 @@ static struct vpe *alloc_vpe(int minor)
{
struct vpe *v;
- if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) {
+ if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL)
return NULL;
- }
INIT_LIST_HEAD(&v->tc);
+ spin_lock(&vpecontrol.vpe_list_lock);
list_add_tail(&v->list, &vpecontrol.vpe_list);
+ spin_unlock(&vpecontrol.vpe_list_lock);
INIT_LIST_HEAD(&v->notify);
v->minor = minor;
+
return v;
}
@@ -212,7 +225,10 @@ static struct tc *alloc_tc(int index)
INIT_LIST_HEAD(&tc->tc);
tc->index = index;
+
+ spin_lock(&vpecontrol.tc_list_lock);
list_add_tail(&tc->list, &vpecontrol.tc_list);
+ spin_unlock(&vpecontrol.tc_list_lock);
out:
return tc;
@@ -227,7 +243,7 @@ static void release_vpe(struct vpe *v)
kfree(v);
}
-static void dump_mtregs(void)
+static void __maybe_unused dump_mtregs(void)
{
unsigned long val;
@@ -1048,20 +1064,19 @@ static int vpe_open(struct inode *inode, struct file *filp)
enum vpe_state state;
struct vpe_notifications *not;
struct vpe *v;
- int ret, err = 0;
+ int ret;
- lock_kernel();
if (minor != iminor(inode)) {
/* assume only 1 device at the moment. */
- printk(KERN_WARNING "VPE loader: only vpe1 is supported\n");
- err = -ENODEV;
- goto out;
+ pr_warning("VPE loader: only vpe1 is supported\n");
+
+ return -ENODEV;
}
if ((v = get_vpe(tclimit)) == NULL) {
- printk(KERN_WARNING "VPE loader: unable to get vpe\n");
- err = -ENODEV;
- goto out;
+ pr_warning("VPE loader: unable to get vpe\n");
+
+ return -ENODEV;
}
state = xchg(&v->state, VPE_STATE_INUSE);
@@ -1101,8 +1116,8 @@ static int vpe_open(struct inode *inode, struct file *filp)
v->shared_ptr = NULL;
v->__start = 0;
-out:
unlock_kernel();
+
return 0;
}
@@ -1594,14 +1609,14 @@ static void __exit vpe_module_exit(void)
{
struct vpe *v, *n;
+ device_del(&vpe_device);
+ unregister_chrdev(major, module_name);
+
+ /* No locking needed here */
list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
- if (v->state != VPE_STATE_UNUSED) {
+ if (v->state != VPE_STATE_UNUSED)
release_vpe(v);
- }
}
-
- device_del(&vpe_device);
- unregister_chrdev(major, module_name);
}
module_init(vpe_module_init);