aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/cpu-probe.c81
-rw-r--r--arch/mips/kernel/entry.S6
-rw-r--r--arch/mips/kernel/mips-cm.c4
-rw-r--r--arch/mips/kernel/mips-cpc.c2
-rw-r--r--arch/mips/kernel/setup.c11
-rw-r--r--arch/mips/kernel/sync-r4k.c5
-rw-r--r--arch/mips/kernel/syscalls/Makefile2
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl2
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl2
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl2
-rw-r--r--arch/mips/kernel/traps.c5
-rw-r--r--arch/mips/kernel/unaligned.c36
-rw-r--r--arch/mips/kernel/vpe.c2
13 files changed, 92 insertions, 68 deletions
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c54332697673..6ab6b03d35ba 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -102,7 +102,12 @@ static void cpu_set_fpu_2008(struct cpuinfo_mips *c)
if (fir & MIPS_FPIR_HAS2008) {
fcsr = read_32bit_cp1_register(CP1_STATUS);
- fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
+ /*
+ * MAC2008 toolchain never landed in real world, so we're only
+ * testing wether it can be disabled and don't try to enabled
+ * it.
+ */
+ fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008 | FPU_CSR_MAC2008);
write_32bit_cp1_register(CP1_STATUS, fcsr0);
fcsr0 = read_32bit_cp1_register(CP1_STATUS);
@@ -112,6 +117,15 @@ static void cpu_set_fpu_2008(struct cpuinfo_mips *c)
write_32bit_cp1_register(CP1_STATUS, fcsr);
+ if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2)) {
+ /*
+ * The bit for MAC2008 might be reused by R6 in future,
+ * so we only test for R2-R5.
+ */
+ if (fcsr0 & FPU_CSR_MAC2008)
+ c->options |= MIPS_CPU_MAC_2008_ONLY;
+ }
+
if (!(fcsr0 & FPU_CSR_NAN2008))
c->options |= MIPS_CPU_NAN_LEGACY;
if (fcsr1 & FPU_CSR_NAN2008)
@@ -1960,10 +1974,8 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
BUG_ON(!__builtin_constant_p(cpu_has_counter) || cpu_has_counter);
switch (c->processor_id & PRID_IMP_MASK) {
- case PRID_IMP_XBURST:
- c->cputype = CPU_XBURST;
- c->writecombine = _CACHE_UNCACHED_ACCELERATED;
- __cpu_name[cpu] = "Ingenic JZRISC";
+ case PRID_IMP_XBURST_REV1:
+
/*
* The XBurst core by default attempts to avoid branch target
* buffer lookups by detecting & special casing loops. This
@@ -1971,34 +1983,43 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
* Set cp0 config7 bit 4 to disable this feature.
*/
set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
- break;
- default:
- panic("Unknown Ingenic Processor ID!");
- break;
- }
- switch (c->processor_id & PRID_COMP_MASK) {
- /*
- * The config0 register in the XBurst CPUs with a processor ID of
- * PRID_COMP_INGENIC_D1 has an abandoned huge page tlb mode, this
- * mode is not compatible with the MIPS standard, it will cause
- * tlbmiss and into an infinite loop (line 21 in the tlb-funcs.S)
- * when starting the init process. After chip reset, the default
- * is HPTLB mode, Write 0xa9000000 to cp0 register 5 sel 4 to
- * switch back to VTLB mode to prevent getting stuck.
- */
- case PRID_COMP_INGENIC_D1:
- write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS);
- break;
- /*
- * The config0 register in the XBurst CPUs with a processor ID of
- * PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible,
- * but they don't actually support this ISA.
- */
- case PRID_COMP_INGENIC_D0:
- c->isa_level &= ~MIPS_CPU_ISA_M32R2;
+ switch (c->processor_id & PRID_COMP_MASK) {
+
+ /*
+ * The config0 register in the XBurst CPUs with a processor ID of
+ * PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible,
+ * but they don't actually support this ISA.
+ */
+ case PRID_COMP_INGENIC_D0:
+ c->isa_level &= ~MIPS_CPU_ISA_M32R2;
+ break;
+
+ /*
+ * The config0 register in the XBurst CPUs with a processor ID of
+ * PRID_COMP_INGENIC_D1 has an abandoned huge page tlb mode, this
+ * mode is not compatible with the MIPS standard, it will cause
+ * tlbmiss and into an infinite loop (line 21 in the tlb-funcs.S)
+ * when starting the init process. After chip reset, the default
+ * is HPTLB mode, Write 0xa9000000 to cp0 register 5 sel 4 to
+ * switch back to VTLB mode to prevent getting stuck.
+ */
+ case PRID_COMP_INGENIC_D1:
+ write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS);
+ break;
+
+ default:
+ break;
+ }
+ /* fall-through */
+ case PRID_IMP_XBURST_REV2:
+ c->cputype = CPU_XBURST;
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ __cpu_name[cpu] = "Ingenic XBurst";
break;
+
default:
+ panic("Unknown Ingenic Processor ID!");
break;
}
}
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 5469d43b6966..4849a48afc0f 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -19,7 +19,7 @@
#include <asm/thread_info.h>
#include <asm/war.h>
-#ifndef CONFIG_PREEMPT
+#ifndef CONFIG_PREEMPTION
#define resume_kernel restore_all
#else
#define __ret_from_irq ret_from_exception
@@ -27,7 +27,7 @@
.text
.align 5
-#ifndef CONFIG_PREEMPT
+#ifndef CONFIG_PREEMPTION
FEXPORT(ret_from_exception)
local_irq_disable # preempt stop
b __ret_from_irq
@@ -53,7 +53,7 @@ resume_userspace:
bnez t0, work_pending
j restore_all
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
resume_kernel:
local_irq_disable
lw t0, TI_PRE_COUNT($28)
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index e5ea3db23d6b..cdb93ed91cde 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -194,7 +194,7 @@ static void mips_cm_probe_l2sync(void)
write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN);
/* Map the region */
- mips_cm_l2sync_base = ioremap_nocache(addr, MIPS_CM_L2SYNC_SIZE);
+ mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE);
}
int mips_cm_probe(void)
@@ -215,7 +215,7 @@ int mips_cm_probe(void)
if (!addr)
return -ENODEV;
- mips_gcr_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE);
+ mips_gcr_base = ioremap(addr, MIPS_CM_GCR_SIZE);
if (!mips_gcr_base)
return -ENXIO;
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 69e3e0b556bf..8d2535123f11 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -78,7 +78,7 @@ int mips_cpc_probe(void)
if (!addr)
return -ENODEV;
- mips_cpc_base = ioremap_nocache(addr, 0x8000);
+ mips_cpc_base = ioremap(addr, 0x8000);
if (!mips_cpc_base)
return -ENXIO;
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index c3d4212b5f1d..a7b469d89e2c 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -515,8 +515,7 @@ static void __init request_crashkernel(struct resource *res)
ret = request_resource(res, &crashk_res);
if (!ret)
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
- (unsigned long)((crashk_res.end -
- crashk_res.start + 1) >> 20),
+ (unsigned long)(resource_size(&crashk_res) >> 20),
(unsigned long)(crashk_res.start >> 20));
}
#else /* !defined(CONFIG_KEXEC) */
@@ -606,7 +605,8 @@ static void __init bootcmdline_init(char **cmdline_p)
* If we're configured to take boot arguments from DT, look for those
* now.
*/
- if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB))
+ if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
+ IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
#endif
@@ -698,8 +698,7 @@ static void __init arch_mem_init(char **cmdline_p)
mips_parse_crashkernel();
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end)
- memblock_reserve(crashk_res.start,
- crashk_res.end - crashk_res.start + 1);
+ memblock_reserve(crashk_res.start, resource_size(&crashk_res));
#endif
device_tree_init();
sparse_init();
@@ -796,8 +795,6 @@ void __init setup_arch(char **cmdline_p)
#if defined(CONFIG_VT)
#if defined(CONFIG_VGA_CONSOLE)
conswitchp = &vga_con;
-#elif defined(CONFIG_DUMMY_CONSOLE)
- conswitchp = &dummy_con;
#endif
#endif
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index f2973ce87f53..abdd7aaa3311 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -90,6 +90,9 @@ void synchronise_count_master(int cpu)
void synchronise_count_slave(int cpu)
{
int i;
+ unsigned long flags;
+
+ local_irq_save(flags);
/*
* Not every cpu is online at the time this gets called,
@@ -113,5 +116,7 @@ void synchronise_count_slave(int cpu)
}
/* Arrange for an interrupt in a short while */
write_c0_compare(read_c0_count() + COUNTON);
+
+ local_irq_restore(flags);
}
#undef NR_LOOPS
diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile
index a3d4bec695c6..6efb2f6889a7 100644
--- a/arch/mips/kernel/syscalls/Makefile
+++ b/arch/mips/kernel/syscalls/Makefile
@@ -18,7 +18,7 @@ quiet_cmd_syshdr = SYSHDR $@
'$(syshdr_pfx_$(basetarget))' \
'$(syshdr_offset_$(basetarget))'
-quiet_cmd_sysnr = SYSNR $@
+quiet_cmd_sysnr = SYSNR $@
cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \
'$(sysnr_abis_$(basetarget))' \
'$(sysnr_pfx_$(basetarget))' \
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index e7c5ab38e403..1f9e8ad636cc 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -374,3 +374,5 @@
433 n32 fspick sys_fspick
434 n32 pidfd_open sys_pidfd_open
435 n32 clone3 __sys_clone3
+437 n32 openat2 sys_openat2
+438 n32 pidfd_getfd sys_pidfd_getfd
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index 13cd66581f3b..c0b9d802dbf6 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -350,3 +350,5 @@
433 n64 fspick sys_fspick
434 n64 pidfd_open sys_pidfd_open
435 n64 clone3 __sys_clone3
+437 n64 openat2 sys_openat2
+438 n64 pidfd_getfd sys_pidfd_getfd
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 353539ea4140..ac586774c980 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -423,3 +423,5 @@
433 o32 fspick sys_fspick
434 o32 pidfd_open sys_pidfd_open
435 o32 clone3 __sys_clone3
+437 o32 openat2 sys_openat2
+438 o32 pidfd_getfd sys_pidfd_getfd
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 83f2a437d9e2..31968cbd6464 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -210,11 +210,6 @@ void show_stack(struct task_struct *task, unsigned long *sp)
regs.regs[29] = task->thread.reg29;
regs.regs[31] = 0;
regs.cp0_epc = task->thread.reg31;
-#ifdef CONFIG_KGDB_KDB
- } else if (atomic_read(&kgdb_active) != -1 &&
- kdb_current_regs) {
- memcpy(&regs, kdb_current_regs, sizeof(regs));
-#endif /* CONFIG_KGDB_KDB */
} else {
prepare_frametrace(&regs);
}
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 92bd2b0f0548..ca6fc4762d97 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -131,7 +131,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
#define _LoadW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -152,7 +152,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
/* For CPUs without lwl instruction */
#define _LoadW(addr, value, res, type) \
do { \
@@ -187,7 +187,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
#define _LoadHWU(addr, value, res, type) \
do { \
@@ -213,7 +213,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
#define _LoadWU(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -256,7 +256,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
/* For CPUs without lwl and ldl instructions */
#define _LoadWU(addr, value, res, type) \
do { \
@@ -340,7 +340,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
#define _StoreHW(addr, value, res, type) \
@@ -366,7 +366,7 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT));\
} while(0)
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
#define _StoreW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -407,7 +407,7 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT)); \
} while(0)
-#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
#define _StoreW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -483,7 +483,7 @@ do { \
: "memory"); \
} while(0)
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
#else /* __BIG_ENDIAN */
@@ -509,7 +509,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
#define _LoadW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -530,7 +530,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
/* For CPUs without lwl instruction */
#define _LoadW(addr, value, res, type) \
do { \
@@ -565,7 +565,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
#define _LoadHWU(addr, value, res, type) \
@@ -592,7 +592,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
#define _LoadWU(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -635,7 +635,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
/* For CPUs without lwl and ldl instructions */
#define _LoadWU(addr, value, res, type) \
do { \
@@ -718,7 +718,7 @@ do { \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
#define _StoreHW(addr, value, res, type) \
do { \
@@ -743,7 +743,7 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT));\
} while(0)
-#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
+#ifndef CONFIG_CPU_NO_LOAD_STORE_LR
#define _StoreW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -784,7 +784,7 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT)); \
} while(0)
-#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#else /* CONFIG_CPU_NO_LOAD_STORE_LR */
/* For CPUs without swl and sdl instructions */
#define _StoreW(addr, value, res, type) \
do { \
@@ -861,7 +861,7 @@ do { \
: "memory"); \
} while(0)
-#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+#endif /* CONFIG_CPU_NO_LOAD_STORE_LR */
#endif
#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 6176b9acba95..d0d832ab3d3b 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -134,7 +134,7 @@ void release_vpe(struct vpe *v)
{
list_del(&v->list);
if (v->load_addr)
- release_progmem(v);
+ release_progmem(v->load_addr);
kfree(v);
}