aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/sh/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/cpu')
-rw-r--r--arch/sh/kernel/cpu/Makefile1
-rw-r--r--arch/sh/kernel/cpu/init.c2
-rw-r--r--arch/sh/kernel/cpu/irq/Makefile3
-rw-r--r--arch/sh/kernel/cpu/irq/intc-sh5.c194
-rw-r--r--arch/sh/kernel/cpu/proc.c1
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7786.c1
-rw-r--r--arch/sh/kernel/cpu/sh5/Makefile16
-rw-r--r--arch/sh/kernel/cpu/sh5/clock-sh5.c76
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S2000
-rw-r--r--arch/sh/kernel/cpu/sh5/fpu.c106
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c72
-rw-r--r--arch/sh/kernel/cpu/sh5/setup-sh5.c121
-rw-r--r--arch/sh/kernel/cpu/sh5/switchto.S195
-rw-r--r--arch/sh/kernel/cpu/sh5/unwind.c342
14 files changed, 3 insertions, 3127 deletions
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile
index f7c22ea98b0f..46118236bf04 100644
--- a/arch/sh/kernel/cpu/Makefile
+++ b/arch/sh/kernel/cpu/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_CPU_SH2) = sh2/
obj-$(CONFIG_CPU_SH2A) = sh2a/
obj-$(CONFIG_CPU_SH3) = sh3/
obj-$(CONFIG_CPU_SH4) = sh4/
-obj-$(CONFIG_CPU_SH5) = sh5/
# Special cases for family ancestry.
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index ce7291e12a30..1d008745877f 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -103,7 +103,7 @@ void __attribute__ ((weak)) l2_cache_init(void)
/*
* Generic first-level cache init
*/
-#if defined(CONFIG_SUPERH32) && !defined(CONFIG_CPU_J2)
+#if !defined(CONFIG_CPU_J2)
static void cache_init(void)
{
unsigned long ccr, flags;
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile
index 8b91cb96411b..e4578cde46ba 100644
--- a/arch/sh/kernel/cpu/irq/Makefile
+++ b/arch/sh/kernel/cpu/irq/Makefile
@@ -2,6 +2,5 @@
#
# Makefile for the Linux/SuperH CPU-specific IRQ handlers.
#
-obj-$(CONFIG_SUPERH32) += imask.o
-obj-$(CONFIG_CPU_SH5) += intc-sh5.o
+obj-y += imask.o
obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o
diff --git a/arch/sh/kernel/cpu/irq/intc-sh5.c b/arch/sh/kernel/cpu/irq/intc-sh5.c
deleted file mode 100644
index 1b3050facda8..000000000000
--- a/arch/sh/kernel/cpu/irq/intc-sh5.c
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/irq/intc-sh5.c
- *
- * Interrupt Controller support for SH5 INTC.
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 Paul Mundt
- *
- * Per-interrupt selective. IRLM=0 (Fixed priority) is not
- * supported being useless without a cascaded interrupt
- * controller.
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <cpu/irq.h>
-#include <asm/page.h>
-
-/*
- * Maybe the generic Peripheral block could move to a more
- * generic include file. INTC Block will be defined here
- * and only here to make INTC self-contained in a single
- * file.
- */
-#define INTC_BLOCK_OFFSET 0x01000000
-
-/* Base */
-#define INTC_BASE PHYS_PERIPHERAL_BLOCK + \
- INTC_BLOCK_OFFSET
-
-/* Address */
-#define INTC_ICR_SET (intc_virt + 0x0)
-#define INTC_ICR_CLEAR (intc_virt + 0x8)
-#define INTC_INTPRI_0 (intc_virt + 0x10)
-#define INTC_INTSRC_0 (intc_virt + 0x50)
-#define INTC_INTSRC_1 (intc_virt + 0x58)
-#define INTC_INTREQ_0 (intc_virt + 0x60)
-#define INTC_INTREQ_1 (intc_virt + 0x68)
-#define INTC_INTENB_0 (intc_virt + 0x70)
-#define INTC_INTENB_1 (intc_virt + 0x78)
-#define INTC_INTDSB_0 (intc_virt + 0x80)
-#define INTC_INTDSB_1 (intc_virt + 0x88)
-
-#define INTC_ICR_IRLM 0x1
-#define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */
-#define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */
-
-
-/*
- * Mapper between the vector ordinal and the IRQ number
- * passed to kernel/device drivers.
- */
-int intc_evt_to_irq[(0xE20/0x20)+1] = {
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */
- 0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */
- 2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */
- 32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */
- -1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */
- -1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */
- 39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */
- 4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */
- 12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */
- -1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */
- -1, -1 /* 0xE00 - 0xE20 */
-};
-
-static unsigned long intc_virt;
-static int irlm; /* IRL mode */
-
-static void enable_intc_irq(struct irq_data *data)
-{
- unsigned int irq = data->irq;
- unsigned long reg;
- unsigned long bitmask;
-
- if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
- printk("Trying to use straight IRL0-3 with an encoding platform.\n");
-
- if (irq < 32) {
- reg = INTC_INTENB_0;
- bitmask = 1 << irq;
- } else {
- reg = INTC_INTENB_1;
- bitmask = 1 << (irq - 32);
- }
-
- __raw_writel(bitmask, reg);
-}
-
-static void disable_intc_irq(struct irq_data *data)
-{
- unsigned int irq = data->irq;
- unsigned long reg;
- unsigned long bitmask;
-
- if (irq < 32) {
- reg = INTC_INTDSB_0;
- bitmask = 1 << irq;
- } else {
- reg = INTC_INTDSB_1;
- bitmask = 1 << (irq - 32);
- }
-
- __raw_writel(bitmask, reg);
-}
-
-static struct irq_chip intc_irq_type = {
- .name = "INTC",
- .irq_enable = enable_intc_irq,
- .irq_disable = disable_intc_irq,
-};
-
-void __init plat_irq_setup(void)
-{
- unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
- unsigned long reg;
- int i;
-
- intc_virt = (unsigned long)ioremap(INTC_BASE, 1024);
- if (!intc_virt) {
- panic("Unable to remap INTC\n");
- }
-
-
- /* Set default: per-line enable/disable, priority driven ack/eoi */
- for (i = 0; i < NR_INTC_IRQS; i++)
- irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq);
-
-
- /* Disable all interrupts and set all priorities to 0 to avoid trouble */
- __raw_writel(-1, INTC_INTDSB_0);
- __raw_writel(-1, INTC_INTDSB_1);
-
- for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
- __raw_writel( NO_PRIORITY, reg);
-
-
-#ifdef CONFIG_SH_CAYMAN
- {
- unsigned long data;
-
- /* Set IRLM */
- /* If all the priorities are set to 'no priority', then
- * assume we are using encoded mode.
- */
- irlm = platform_int_priority[IRQ_IRL0] +
- platform_int_priority[IRQ_IRL1] +
- platform_int_priority[IRQ_IRL2] +
- platform_int_priority[IRQ_IRL3];
- if (irlm == NO_PRIORITY) {
- /* IRLM = 0 */
- reg = INTC_ICR_CLEAR;
- i = IRQ_INTA;
- printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
- } else {
- /* IRLM = 1 */
- reg = INTC_ICR_SET;
- i = IRQ_IRL0;
- }
- __raw_writel(INTC_ICR_IRLM, reg);
-
- /* Set interrupt priorities according to platform description */
- for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
- data |= platform_int_priority[i] <<
- ((i % INTC_INTPRI_PPREG) * 4);
- if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
- /* Upon the 7th, set Priority Register */
- __raw_writel(data, reg);
- data = 0;
- reg += 8;
- }
- }
- }
-#endif
-
- /*
- * And now let interrupts come in.
- * sti() is not enough, we need to
- * lower priority, too.
- */
- __asm__ __volatile__("getcon " __SR ", %0\n\t"
- "and %0, %1, %0\n\t"
- "putcon %0, " __SR "\n\t"
- : "=&r" (__dummy0)
- : "r" (__dummy1));
-}
diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c
index 85961b4f9c69..a306bcd6b341 100644
--- a/arch/sh/kernel/cpu/proc.c
+++ b/arch/sh/kernel/cpu/proc.c
@@ -24,7 +24,6 @@ static const char *cpu_name[] = {
[CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
[CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757",
[CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
- [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
[CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
[CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
[CPU_SH7372] = "SH7372", [CPU_SH7734] = "SH7734",
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
index 4b0db8259e3d..74620f30b19b 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c
@@ -391,6 +391,7 @@ static struct platform_device *sh7786_early_devices[] __initdata = {
&tmu0_device,
&tmu1_device,
&tmu2_device,
+ &tmu3_device,
};
static struct platform_device *sh7786_devices[] __initdata = {
diff --git a/arch/sh/kernel/cpu/sh5/Makefile b/arch/sh/kernel/cpu/sh5/Makefile
deleted file mode 100644
index 97d23ec3005f..000000000000
--- a/arch/sh/kernel/cpu/sh5/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the Linux/SuperH SH-5 backends.
-#
-obj-y := entry.o probe.o switchto.o
-
-obj-$(CONFIG_SH_FPU) += fpu.o
-obj-$(CONFIG_KALLSYMS) += unwind.o
-
-# CPU subtype setup
-obj-$(CONFIG_CPU_SH5) += setup-sh5.o
-
-# Primary on-chip clocks (common)
-clock-$(CONFIG_CPU_SH5) := clock-sh5.o
-
-obj-y += $(clock-y)
diff --git a/arch/sh/kernel/cpu/sh5/clock-sh5.c b/arch/sh/kernel/cpu/sh5/clock-sh5.c
deleted file mode 100644
index dee6be2c2344..000000000000
--- a/arch/sh/kernel/cpu/sh5/clock-sh5.c
+++ /dev/null
@@ -1,76 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/sh5/clock-sh5.c
- *
- * SH-5 support for the clock framework
- *
- * Copyright (C) 2008 Paul Mundt
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <asm/clock.h>
-#include <asm/io.h>
-
-static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
-
-/* Clock, Power and Reset Controller */
-#define CPRC_BLOCK_OFF 0x01010000
-#define CPRC_BASE (PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF)
-
-static unsigned long cprc_base;
-
-static void master_clk_init(struct clk *clk)
-{
- int idx = (__raw_readl(cprc_base + 0x00) >> 6) & 0x0007;
- clk->rate *= ifc_table[idx];
-}
-
-static struct sh_clk_ops sh5_master_clk_ops = {
- .init = master_clk_init,
-};
-
-static unsigned long module_clk_recalc(struct clk *clk)
-{
- int idx = (__raw_readw(cprc_base) >> 12) & 0x0007;
- return clk->parent->rate / ifc_table[idx];
-}
-
-static struct sh_clk_ops sh5_module_clk_ops = {
- .recalc = module_clk_recalc,
-};
-
-static unsigned long bus_clk_recalc(struct clk *clk)
-{
- int idx = (__raw_readw(cprc_base) >> 3) & 0x0007;
- return clk->parent->rate / ifc_table[idx];
-}
-
-static struct sh_clk_ops sh5_bus_clk_ops = {
- .recalc = bus_clk_recalc,
-};
-
-static unsigned long cpu_clk_recalc(struct clk *clk)
-{
- int idx = (__raw_readw(cprc_base) & 0x0007);
- return clk->parent->rate / ifc_table[idx];
-}
-
-static struct sh_clk_ops sh5_cpu_clk_ops = {
- .recalc = cpu_clk_recalc,
-};
-
-static struct sh_clk_ops *sh5_clk_ops[] = {
- &sh5_master_clk_ops,
- &sh5_module_clk_ops,
- &sh5_bus_clk_ops,
- &sh5_cpu_clk_ops,
-};
-
-void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
-{
- cprc_base = (unsigned long)ioremap(CPRC_BASE, 1024);
- BUG_ON(!cprc_base);
-
- if (idx < ARRAY_SIZE(sh5_clk_ops))
- *ops = sh5_clk_ops[idx];
-}
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
deleted file mode 100644
index 81c8b64b977f..000000000000
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ /dev/null
@@ -1,2000 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * arch/sh/kernel/cpu/sh5/entry.S
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2004 - 2008 Paul Mundt
- * Copyright (C) 2003, 2004 Richard Curnow
- */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/sys.h>
-#include <cpu/registers.h>
-#include <asm/processor.h>
-#include <asm/unistd.h>
-#include <asm/thread_info.h>
-#include <asm/asm-offsets.h>
-
-/*
- * SR fields.
- */
-#define SR_ASID_MASK 0x00ff0000
-#define SR_FD_MASK 0x00008000
-#define SR_SS 0x08000000
-#define SR_BL 0x10000000
-#define SR_MD 0x40000000
-
-/*
- * Event code.
- */
-#define EVENT_INTERRUPT 0
-#define EVENT_FAULT_TLB 1
-#define EVENT_FAULT_NOT_TLB 2
-#define EVENT_DEBUG 3
-
-/* EXPEVT values */
-#define RESET_CAUSE 0x20
-#define DEBUGSS_CAUSE 0x980
-
-/*
- * Frame layout. Quad index.
- */
-#define FRAME_T(x) FRAME_TBASE+(x*8)
-#define FRAME_R(x) FRAME_RBASE+(x*8)
-#define FRAME_S(x) FRAME_SBASE+(x*8)
-#define FSPC 0
-#define FSSR 1
-#define FSYSCALL_ID 2
-
-/* Arrange the save frame to be a multiple of 32 bytes long */
-#define FRAME_SBASE 0
-#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
-#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
-#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
-#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
-
-#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
-#define FP_FRAME_BASE 0
-
-#define SAVED_R2 0*8
-#define SAVED_R3 1*8
-#define SAVED_R4 2*8
-#define SAVED_R5 3*8
-#define SAVED_R18 4*8
-#define SAVED_R6 5*8
-#define SAVED_TR0 6*8
-
-/* These are the registers saved in the TLB path that aren't saved in the first
- level of the normal one. */
-#define TLB_SAVED_R25 7*8
-#define TLB_SAVED_TR1 8*8
-#define TLB_SAVED_TR2 9*8
-#define TLB_SAVED_TR3 10*8
-#define TLB_SAVED_TR4 11*8
-/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
- breakage otherwise. */
-#define TLB_SAVED_R0 12*8
-#define TLB_SAVED_R1 13*8
-
-#define CLI() \
- getcon SR, r6; \
- ori r6, 0xf0, r6; \
- putcon r6, SR;
-
-#define STI() \
- getcon SR, r6; \
- andi r6, ~0xf0, r6; \
- putcon r6, SR;
-
-#ifdef CONFIG_PREEMPTION
-# define preempt_stop() CLI()
-#else
-# define preempt_stop()
-# define resume_kernel restore_all
-#endif
-
- .section .data, "aw"
-
-#define FAST_TLBMISS_STACK_CACHELINES 4
-#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
-
-/* Register back-up area for all exceptions */
- .balign 32
- /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
- * register saves etc. */
- .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
-/* This is 32 byte aligned by construction */
-/* Register back-up area for all exceptions */
-reg_save_area:
- .quad 0
- .quad 0
- .quad 0
- .quad 0
-
- .quad 0
- .quad 0
- .quad 0
- .quad 0
-
- .quad 0
- .quad 0
- .quad 0
- .quad 0
-
- .quad 0
- .quad 0
-
-/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
- * reentrancy. Note this area may be accessed via physical address.
- * Align so this fits a whole single cache line, for ease of purging.
- */
- .balign 32,0,32
-resvec_save_area:
- .quad 0
- .quad 0
- .quad 0
- .quad 0
- .quad 0
- .balign 32,0,32
-
-/* Jump table of 3rd level handlers */
-trap_jtable:
- .long do_exception_error /* 0x000 */
- .long do_exception_error /* 0x020 */
-#ifdef CONFIG_MMU
- .long tlb_miss_load /* 0x040 */
- .long tlb_miss_store /* 0x060 */
-#else
- .long do_exception_error
- .long do_exception_error
-#endif
- ! ARTIFICIAL pseudo-EXPEVT setting
- .long do_debug_interrupt /* 0x080 */
-#ifdef CONFIG_MMU
- .long tlb_miss_load /* 0x0A0 */
- .long tlb_miss_store /* 0x0C0 */
-#else
- .long do_exception_error
- .long do_exception_error
-#endif
- .long do_address_error_load /* 0x0E0 */
- .long do_address_error_store /* 0x100 */
-#ifdef CONFIG_SH_FPU
- .long do_fpu_error /* 0x120 */
-#else
- .long do_exception_error /* 0x120 */
-#endif
- .long do_exception_error /* 0x140 */
- .long system_call /* 0x160 */
- .long do_reserved_inst /* 0x180 */
- .long do_illegal_slot_inst /* 0x1A0 */
- .long do_exception_error /* 0x1C0 - NMI */
- .long do_exception_error /* 0x1E0 */
- .rept 15
- .long do_IRQ /* 0x200 - 0x3C0 */
- .endr
- .long do_exception_error /* 0x3E0 */
- .rept 32
- .long do_IRQ /* 0x400 - 0x7E0 */
- .endr
- .long fpu_error_or_IRQA /* 0x800 */
- .long fpu_error_or_IRQB /* 0x820 */
- .long do_IRQ /* 0x840 */
- .long do_IRQ /* 0x860 */
- .rept 6
- .long do_exception_error /* 0x880 - 0x920 */
- .endr
- .long breakpoint_trap_handler /* 0x940 */
- .long do_exception_error /* 0x960 */
- .long do_single_step /* 0x980 */
-
- .rept 3
- .long do_exception_error /* 0x9A0 - 0x9E0 */
- .endr
- .long do_IRQ /* 0xA00 */
- .long do_IRQ /* 0xA20 */
-#ifdef CONFIG_MMU
- .long itlb_miss_or_IRQ /* 0xA40 */
-#else
- .long do_IRQ
-#endif
- .long do_IRQ /* 0xA60 */
- .long do_IRQ /* 0xA80 */
-#ifdef CONFIG_MMU
- .long itlb_miss_or_IRQ /* 0xAA0 */
-#else
- .long do_IRQ
-#endif
- .long do_exception_error /* 0xAC0 */
- .long do_address_error_exec /* 0xAE0 */
- .rept 8
- .long do_exception_error /* 0xB00 - 0xBE0 */
- .endr
- .rept 18
- .long do_IRQ /* 0xC00 - 0xE20 */
- .endr
-
- .section .text64, "ax"
-
-/*
- * --- Exception/Interrupt/Event Handling Section
- */
-
-/*
- * VBR and RESVEC blocks.
- *
- * First level handler for VBR-based exceptions.
- *
- * To avoid waste of space, align to the maximum text block size.
- * This is assumed to be at most 128 bytes or 32 instructions.
- * DO NOT EXCEED 32 instructions on the first level handlers !
- *
- * Also note that RESVEC is contained within the VBR block
- * where the room left (1KB - TEXT_SIZE) allows placing
- * the RESVEC block (at most 512B + TEXT_SIZE).
- *
- * So first (and only) level handler for RESVEC-based exceptions.
- *
- * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
- * and interrupt) we are a lot tight with register space until
- * saving onto the stack frame, which is done in handle_exception().
- *
- */
-
-#define TEXT_SIZE 128
-#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
-
- .balign TEXT_SIZE
-LVBR_block:
- .space 256, 0 /* Power-on class handler, */
- /* not required here */
-not_a_tlb_miss:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- /* Save original stack pointer into KCR1 */
- putcon SP, KCR1
-
- /* Save other original registers into reg_save_area */
- movi reg_save_area, SP
- st.q SP, SAVED_R2, r2
- st.q SP, SAVED_R3, r3
- st.q SP, SAVED_R4, r4
- st.q SP, SAVED_R5, r5
- st.q SP, SAVED_R6, r6
- st.q SP, SAVED_R18, r18
- gettr tr0, r3
- st.q SP, SAVED_TR0, r3
-
- /* Set args for Non-debug, Not a TLB miss class handler */
- getcon EXPEVT, r2
- movi ret_from_exception, r3
- ori r3, 1, r3
- movi EVENT_FAULT_NOT_TLB, r4
- or SP, ZERO, r5
- getcon KCR1, SP
- pta handle_exception, tr0
- blink tr0, ZERO
-
- .balign 256
- ! VBR+0x200
- nop
- .balign 256
- ! VBR+0x300
- nop
- .balign 256
- /*
- * Instead of the natural .balign 1024 place RESVEC here
- * respecting the final 1KB alignment.
- */
- .balign TEXT_SIZE
- /*
- * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
- * block making sure the final alignment is correct.
- */
-#ifdef CONFIG_MMU
-tlb_miss:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- putcon SP, KCR1
- movi reg_save_area, SP
- /* SP is guaranteed 32-byte aligned. */
- st.q SP, TLB_SAVED_R0 , r0
- st.q SP, TLB_SAVED_R1 , r1
- st.q SP, SAVED_R2 , r2
- st.q SP, SAVED_R3 , r3
- st.q SP, SAVED_R4 , r4
- st.q SP, SAVED_R5 , r5
- st.q SP, SAVED_R6 , r6
- st.q SP, SAVED_R18, r18
-
- /* Save R25 for safety; as/ld may want to use it to achieve the call to
- * the code in mm/tlbmiss.c */
- st.q SP, TLB_SAVED_R25, r25
- gettr tr0, r2
- gettr tr1, r3
- gettr tr2, r4
- gettr tr3, r5
- gettr tr4, r18
- st.q SP, SAVED_TR0 , r2
- st.q SP, TLB_SAVED_TR1 , r3
- st.q SP, TLB_SAVED_TR2 , r4
- st.q SP, TLB_SAVED_TR3 , r5
- st.q SP, TLB_SAVED_TR4 , r18
-
- pt do_fast_page_fault, tr0
- getcon SSR, r2
- getcon EXPEVT, r3
- getcon TEA, r4
- shlri r2, 30, r2
- andi r2, 1, r2 /* r2 = SSR.MD */
- blink tr0, LINK
-
- pt fixup_to_invoke_general_handler, tr1
-
- /* If the fast path handler fixed the fault, just drop through quickly
- to the restore code right away to return to the excepting context.
- */
- bnei/u r2, 0, tr1
-
-fast_tlb_miss_restore:
- ld.q SP, SAVED_TR0, r2
- ld.q SP, TLB_SAVED_TR1, r3
- ld.q SP, TLB_SAVED_TR2, r4
-
- ld.q SP, TLB_SAVED_TR3, r5
- ld.q SP, TLB_SAVED_TR4, r18
-
- ptabs r2, tr0
- ptabs r3, tr1
- ptabs r4, tr2
- ptabs r5, tr3
- ptabs r18, tr4
-
- ld.q SP, TLB_SAVED_R0, r0
- ld.q SP, TLB_SAVED_R1, r1
- ld.q SP, SAVED_R2, r2
- ld.q SP, SAVED_R3, r3
- ld.q SP, SAVED_R4, r4
- ld.q SP, SAVED_R5, r5
- ld.q SP, SAVED_R6, r6
- ld.q SP, SAVED_R18, r18
- ld.q SP, TLB_SAVED_R25, r25
-
- getcon KCR1, SP
- rte
- nop /* for safety, in case the code is run on sh5-101 cut1.x */
-
-fixup_to_invoke_general_handler:
-
- /* OK, new method. Restore stuff that's not expected to get saved into
- the 'first-level' reg save area, then just fall through to setting
- up the registers and calling the second-level handler. */
-
- /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
- r25,tr1-4 and save r6 to get into the right state. */
-
- ld.q SP, TLB_SAVED_TR1, r3
- ld.q SP, TLB_SAVED_TR2, r4
- ld.q SP, TLB_SAVED_TR3, r5
- ld.q SP, TLB_SAVED_TR4, r18
- ld.q SP, TLB_SAVED_R25, r25
-
- ld.q SP, TLB_SAVED_R0, r0
- ld.q SP, TLB_SAVED_R1, r1
-
- ptabs/u r3, tr1
- ptabs/u r4, tr2
- ptabs/u r5, tr3
- ptabs/u r18, tr4
-
- /* Set args for Non-debug, TLB miss class handler */
- getcon EXPEVT, r2
- movi ret_from_exception, r3
- ori r3, 1, r3
- movi EVENT_FAULT_TLB, r4
- or SP, ZERO, r5
- getcon KCR1, SP
- pta handle_exception, tr0
- blink tr0, ZERO
-#else /* CONFIG_MMU */
- .balign 256
-#endif
-
-/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
- DOES END UP AT VBR+0x600 */
- nop
- nop
- nop
- nop
- nop
- nop
-
- .balign 256
- /* VBR + 0x600 */
-
-interrupt:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- /* Save original stack pointer into KCR1 */
- putcon SP, KCR1
-
- /* Save other original registers into reg_save_area */
- movi reg_save_area, SP
- st.q SP, SAVED_R2, r2
- st.q SP, SAVED_R3, r3
- st.q SP, SAVED_R4, r4
- st.q SP, SAVED_R5, r5
- st.q SP, SAVED_R6, r6
- st.q SP, SAVED_R18, r18
- gettr tr0, r3
- st.q SP, SAVED_TR0, r3
-
- /* Set args for interrupt class handler */
- getcon INTEVT, r2
- movi ret_from_irq, r3
- ori r3, 1, r3
- movi EVENT_INTERRUPT, r4
- or SP, ZERO, r5
- getcon KCR1, SP
- pta handle_exception, tr0
- blink tr0, ZERO
- .balign TEXT_SIZE /* let's waste the bare minimum */
-
-LVBR_block_end: /* Marker. Used for total checking */
-
- .balign 256
-LRESVEC_block:
- /* Panic handler. Called with MMU off. Possible causes/actions:
- * - Reset: Jump to program start.
- * - Single Step: Turn off Single Step & return.
- * - Others: Call panic handler, passing PC as arg.
- * (this may need to be extended...)
- */
-reset_or_panic:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- putcon SP, DCR
- /* First save r0-1 and tr0, as we need to use these */
- movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
- st.q SP, 0, r0
- st.q SP, 8, r1
- gettr tr0, r0
- st.q SP, 32, r0
-
- /* Check cause */
- getcon EXPEVT, r0
- movi RESET_CAUSE, r1
- sub r1, r0, r1 /* r1=0 if reset */
- movi _stext-CONFIG_PAGE_OFFSET, r0
- ori r0, 1, r0
- ptabs r0, tr0
- beqi r1, 0, tr0 /* Jump to start address if reset */
-
- getcon EXPEVT, r0
- movi DEBUGSS_CAUSE, r1
- sub r1, r0, r1 /* r1=0 if single step */
- pta single_step_panic, tr0
- beqi r1, 0, tr0 /* jump if single step */
-
- /* Now jump to where we save the registers. */
- movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
- ptabs r1, tr0
- blink tr0, r63
-
-single_step_panic:
- /* We are in a handler with Single Step set. We need to resume the
- * handler, by turning on MMU & turning off Single Step. */
- getcon SSR, r0
- movi SR_MMU, r1
- or r0, r1, r0
- movi ~SR_SS, r1
- and r0, r1, r0
- putcon r0, SSR
- /* Restore EXPEVT, as the rte won't do this */
- getcon PEXPEVT, r0
- putcon r0, EXPEVT
- /* Restore regs */
- ld.q SP, 32, r0
- ptabs r0, tr0
- ld.q SP, 0, r0
- ld.q SP, 8, r1
- getcon DCR, SP
- synco
- rte
-
-
- .balign 256
-debug_exception:
- synco /* TAKum03020 (but probably a good idea anyway.) */
- /*
- * Single step/software_break_point first level handler.
- * Called with MMU off, so the first thing we do is enable it
- * by doing an rte with appropriate SSR.
- */
- putcon SP, DCR
- /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
- movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
-
- /* With the MMU off, we are bypassing the cache, so purge any
- * data that will be made stale by the following stores.
- */
- ocbp SP, 0
- synco
-
- st.q SP, 0, r0
- st.q SP, 8, r1
- getcon SPC, r0
- st.q SP, 16, r0
- getcon SSR, r0
- st.q SP, 24, r0
-
- /* Enable MMU, block exceptions, set priv mode, disable single step */
- movi SR_MMU | SR_BL | SR_MD, r1
- or r0, r1, r0
- movi ~SR_SS, r1
- and r0, r1, r0
- putcon r0, SSR
- /* Force control to debug_exception_2 when rte is executed */
- movi debug_exeception_2, r0
- ori r0, 1, r0 /* force SHmedia, just in case */
- putcon r0, SPC
- getcon DCR, SP
- synco
- rte
-debug_exeception_2:
- /* Restore saved regs */
- putcon SP, KCR1
- movi resvec_save_area, SP
- ld.q SP, 24, r0
- putcon r0, SSR
- ld.q SP, 16, r0
- putcon r0, SPC
- ld.q SP, 0, r0
- ld.q SP, 8, r1
-
- /* Save other original registers into reg_save_area */
- movi reg_save_area, SP
- st.q SP, SAVED_R2, r2
- st.q SP, SAVED_R3, r3
- st.q SP, SAVED_R4, r4
- st.q SP, SAVED_R5, r5
- st.q SP, SAVED_R6, r6
- st.q SP, SAVED_R18, r18
- gettr tr0, r3
- st.q SP, SAVED_TR0, r3
-
- /* Set args for debug class handler */
- getcon EXPEVT, r2
- movi ret_from_exception, r3
- ori r3, 1, r3
- movi EVENT_DEBUG, r4
- or SP, ZERO, r5
- getcon KCR1, SP
- pta handle_exception, tr0
- blink tr0, ZERO
-
- .balign 256
-debug_interrupt:
- /* !!! WE COME HERE IN REAL MODE !!! */
- /* Hook-up debug interrupt to allow various debugging options to be
- * hooked into its handler. */
- /* Save original stack pointer into KCR1 */
- synco
- putcon SP, KCR1
- movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
- ocbp SP, 0
- ocbp SP, 32
- synco
-
- /* Save other original registers into reg_save_area thru real addresses */
- st.q SP, SAVED_R2, r2
- st.q SP, SAVED_R3, r3
- st.q SP, SAVED_R4, r4
- st.q SP, SAVED_R5, r5
- st.q SP, SAVED_R6, r6
- st.q SP, SAVED_R18, r18
- gettr tr0, r3
- st.q SP, SAVED_TR0, r3
-
- /* move (spc,ssr)->(pspc,pssr). The rte will shift
- them back again, so that they look like the originals
- as far as the real handler code is concerned. */
- getcon spc, r6
- putcon r6, pspc
- getcon ssr, r6
- putcon r6, pssr
-
- ! construct useful SR for handle_exception
- movi 3, r6
- shlli r6, 30, r6
- getcon sr, r18
- or r18, r6, r6
- putcon r6, ssr
-
- ! SSR is now the current SR with the MD and MMU bits set
- ! i.e. the rte will switch back to priv mode and put
- ! the mmu back on
-
- ! construct spc
- movi handle_exception, r18
- ori r18, 1, r18 ! for safety (do we need this?)
- putcon r18, spc
-
- /* Set args for Non-debug, Not a TLB miss class handler */
-
- ! EXPEVT==0x80 is unused, so 'steal' this value to put the
- ! debug interrupt handler in the vectoring table
- movi 0x80, r2
- movi ret_from_exception, r3
- ori r3, 1, r3
- movi EVENT_FAULT_NOT_TLB, r4
-
- or SP, ZERO, r5
- movi CONFIG_PAGE_OFFSET, r6
- add r6, r5, r5
- getcon KCR1, SP
-
- synco ! for safety
- rte ! -> handle_exception, switch back to priv mode again
-
-LRESVEC_block_end: /* Marker. Unused. */
-
- .balign TEXT_SIZE
-
-/*
- * Second level handler for VBR-based exceptions. Pre-handler.
- * In common to all stack-frame sensitive handlers.
- *
- * Inputs:
- * (KCR0) Current [current task union]
- * (KCR1) Original SP
- * (r2) INTEVT/EXPEVT
- * (r3) appropriate return address
- * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
- * (r5) Pointer to reg_save_area
- * (SP) Original SP
- *
- * Available registers:
- * (r6)
- * (r18)
- * (tr0)
- *
- */
-handle_exception:
- /* Common 2nd level handler. */
-
- /* First thing we need an appropriate stack pointer */
- getcon SSR, r6
- shlri r6, 30, r6
- andi r6, 1, r6
- pta stack_ok, tr0
- bne r6, ZERO, tr0 /* Original stack pointer is fine */
-
- /* Set stack pointer for user fault */
- getcon KCR0, SP
- movi THREAD_SIZE, r6 /* Point to the end */
- add SP, r6, SP
-
-stack_ok:
-
-/* DEBUG : check for underflow/overflow of the kernel stack */
- pta no_underflow, tr0
- getcon KCR0, r6
- movi 1024, r18
- add r6, r18, r6
- bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
-
-/* Just panic to cause a crash. */
-bad_sp:
- ld.b r63, 0, r6
- nop
-
-no_underflow:
- pta bad_sp, tr0
- getcon kcr0, r6
- movi THREAD_SIZE, r18
- add r18, r6, r6
- bgt SP, r6, tr0 ! sp above the stack
-
- /* Make some room for the BASIC frame. */
- movi -(FRAME_SIZE), r6
- add SP, r6, SP
-
-/* Could do this with no stalling if we had another spare register, but the
- code below will be OK. */
- ld.q r5, SAVED_R2, r6
- ld.q r5, SAVED_R3, r18
- st.q SP, FRAME_R(2), r6
- ld.q r5, SAVED_R4, r6
- st.q SP, FRAME_R(3), r18
- ld.q r5, SAVED_R5, r18
- st.q SP, FRAME_R(4), r6
- ld.q r5, SAVED_R6, r6
- st.q SP, FRAME_R(5), r18
- ld.q r5, SAVED_R18, r18
- st.q SP, FRAME_R(6), r6
- ld.q r5, SAVED_TR0, r6
- st.q SP, FRAME_R(18), r18
- st.q SP, FRAME_T(0), r6
-
- /* Keep old SP around */
- getcon KCR1, r6
-
- /* Save the rest of the general purpose registers */
- st.q SP, FRAME_R(0), r0
- st.q SP, FRAME_R(1), r1
- st.q SP, FRAME_R(7), r7
- st.q SP, FRAME_R(8), r8
- st.q SP, FRAME_R(9), r9
- st.q SP, FRAME_R(10), r10
- st.q SP, FRAME_R(11), r11
- st.q SP, FRAME_R(12), r12
- st.q SP, FRAME_R(13), r13
- st.q SP, FRAME_R(14), r14
-
- /* SP is somewhere else */
- st.q SP, FRAME_R(15), r6
-
- st.q SP, FRAME_R(16), r16
- st.q SP, FRAME_R(17), r17
- /* r18 is saved earlier. */
- st.q SP, FRAME_R(19), r19
- st.q SP, FRAME_R(20), r20
- st.q SP, FRAME_R(21), r21
- st.q SP, FRAME_R(22), r22
- st.q SP, FRAME_R(23), r23
- st.q SP, FRAME_R(24), r24
- st.q SP, FRAME_R(25), r25
- st.q SP, FRAME_R(26), r26
- st.q SP, FRAME_R(27), r27
- st.q SP, FRAME_R(28), r28
- st.q SP, FRAME_R(29), r29
- st.q SP, FRAME_R(30), r30
- st.q SP, FRAME_R(31), r31
- st.q SP, FRAME_R(32), r32
- st.q SP, FRAME_R(33), r33
- st.q SP, FRAME_R(34), r34
- st.q SP, FRAME_R(35), r35
- st.q SP, FRAME_R(36), r36
- st.q SP, FRAME_R(37), r37
- st.q SP, FRAME_R(38), r38
- st.q SP, FRAME_R(39), r39
- st.q SP, FRAME_R(40), r40
- st.q SP, FRAME_R(41), r41
- st.q SP, FRAME_R(42), r42
- st.q SP, FRAME_R(43), r43
- st.q SP, FRAME_R(44), r44
- st.q SP, FRAME_R(45), r45
- st.q SP, FRAME_R(46), r46
- st.q SP, FRAME_R(47), r47
- st.q SP, FRAME_R(48), r48
- st.q SP, FRAME_R(49), r49
- st.q SP, FRAME_R(50), r50
- st.q SP, FRAME_R(51), r51
- st.q SP, FRAME_R(52), r52
- st.q SP, FRAME_R(53), r53
- st.q SP, FRAME_R(54), r54
- st.q SP, FRAME_R(55), r55
- st.q SP, FRAME_R(56), r56
- st.q SP, FRAME_R(57), r57
- st.q SP, FRAME_R(58), r58
- st.q SP, FRAME_R(59), r59
- st.q SP, FRAME_R(60), r60
- st.q SP, FRAME_R(61), r61
- st.q SP, FRAME_R(62), r62
-
- /*
- * Save the S* registers.
- */
- getcon SSR, r61
- st.q SP, FRAME_S(FSSR), r61
- getcon SPC, r62
- st.q SP, FRAME_S(FSPC), r62
- movi -1, r62 /* Reset syscall_nr */
- st.q SP, FRAME_S(FSYSCALL_ID), r62
-
- /* Save the rest of the target registers */
- gettr tr1, r6
- st.q SP, FRAME_T(1), r6
- gettr tr2, r6
- st.q SP, FRAME_T(2), r6
- gettr tr3, r6
- st.q SP, FRAME_T(3), r6
- gettr tr4, r6
- st.q SP, FRAME_T(4), r6
- gettr tr5, r6
- st.q SP, FRAME_T(5), r6
- gettr tr6, r6
- st.q SP, FRAME_T(6), r6
- gettr tr7, r6
- st.q SP, FRAME_T(7), r6
-
- ! setup FP so that unwinder can wind back through nested kernel mode
- ! exceptions
- add SP, ZERO, r14
-
- /* For syscall and debug race condition, get TRA now */
- getcon TRA, r5
-
- /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
- * Also set FD, to catch FPU usage in the kernel.
- *
- * benedict.gaster@superh.com 29/07/2002
- *
- * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
- * same time change BL from 1->0, as any pending interrupt of a level
- * higher than he previous value of IMASK will leak through and be
- * taken unexpectedly.
- *
- * To avoid this we raise the IMASK and then issue another PUTCON to
- * enable interrupts.
- */
- getcon SR, r6
- movi SR_IMASK | SR_FD, r7
- or r6, r7, r6
- putcon r6, SR
- movi SR_UNBLOCK_EXC, r7
- and r6, r7, r6
- putcon r6, SR
-
-
- /* Now call the appropriate 3rd level handler */
- or r3, ZERO, LINK
- movi trap_jtable, r3
- shlri r2, 3, r2
- ldx.l r2, r3, r3
- shlri r2, 2, r2
- ptabs r3, tr0
- or SP, ZERO, r3
- blink tr0, ZERO
-
-/*
- * Second level handler for VBR-based exceptions. Post-handlers.
- *
- * Post-handlers for interrupts (ret_from_irq), exceptions
- * (ret_from_exception) and common reentrance doors (restore_all
- * to get back to the original context, ret_from_syscall loop to
- * check kernel exiting).
- *
- * ret_with_reschedule and work_notifysig are an inner lables of
- * the ret_from_syscall loop.
- *
- * In common to all stack-frame sensitive handlers.
- *
- * Inputs:
- * (SP) struct pt_regs *, original register's frame pointer (basic)
- *
- */
- .global ret_from_irq
-ret_from_irq:
- ld.q SP, FRAME_S(FSSR), r6
- shlri r6, 30, r6
- andi r6, 1, r6
- pta resume_kernel, tr0
- bne r6, ZERO, tr0 /* no further checks */
- STI()
- pta ret_with_reschedule, tr0
- blink tr0, ZERO /* Do not check softirqs */
-
- .global ret_from_exception
-ret_from_exception:
- preempt_stop()
-
- ld.q SP, FRAME_S(FSSR), r6
- shlri r6, 30, r6
- andi r6, 1, r6
- pta resume_kernel, tr0
- bne r6, ZERO, tr0 /* no further checks */
-
- /* Check softirqs */
-
-#ifdef CONFIG_PREEMPTION
- pta ret_from_syscall, tr0
- blink tr0, ZERO
-
-resume_kernel:
- CLI()
-
- pta restore_all, tr0
-
- getcon KCR0, r6
- ld.l r6, TI_PRE_COUNT, r7
- beq/u r7, ZERO, tr0
-
-need_resched:
- ld.l r6, TI_FLAGS, r7
- movi (1 << TIF_NEED_RESCHED), r8
- and r8, r7, r8
- bne r8, ZERO, tr0
-
- getcon SR, r7
- andi r7, 0xf0, r7
- bne r7, ZERO, tr0
-
- movi preempt_schedule_irq, r7
- ori r7, 1, r7
- ptabs r7, tr1
- blink tr1, LINK
-
- pta need_resched, tr1
- blink tr1, ZERO
-#endif
-
- .global ret_from_syscall
-ret_from_syscall:
-
-ret_with_reschedule:
- getcon KCR0, r6 ! r6 contains current_thread_info
- ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
-
- movi _TIF_NEED_RESCHED, r8
- and r8, r7, r8
- pta work_resched, tr0
- bne r8, ZERO, tr0
-
- pta restore_all, tr1
-
- movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
- and r8, r7, r8
- pta work_notifysig, tr0
- bne r8, ZERO, tr0
-
- blink tr1, ZERO
-
-work_resched:
- pta ret_from_syscall, tr0
- gettr tr0, LINK
- movi schedule, r6
- ptabs r6, tr0
- blink tr0, ZERO /* Call schedule(), return on top */
-
-work_notifysig:
- gettr tr1, LINK
-
- movi do_notify_resume, r6
- ptabs r6, tr0
- or SP, ZERO, r2
- or r7, ZERO, r3
- blink tr0, LINK /* Call do_notify_resume(regs, current_thread_info->flags), return here */
-
-restore_all:
- /* Do prefetches */
-
- ld.q SP, FRAME_T(0), r6
- ld.q SP, FRAME_T(1), r7
- ld.q SP, FRAME_T(2), r8
- ld.q SP, FRAME_T(3), r9
- ptabs r6, tr0
- ptabs r7, tr1
- ptabs r8, tr2
- ptabs r9, tr3
- ld.q SP, FRAME_T(4), r6
- ld.q SP, FRAME_T(5), r7
- ld.q SP, FRAME_T(6), r8
- ld.q SP, FRAME_T(7), r9
- ptabs r6, tr4
- ptabs r7, tr5
- ptabs r8, tr6
- ptabs r9, tr7
-
- ld.q SP, FRAME_R(0), r0
- ld.q SP, FRAME_R(1), r1
- ld.q SP, FRAME_R(2), r2
- ld.q SP, FRAME_R(3), r3
- ld.q SP, FRAME_R(4), r4
- ld.q SP, FRAME_R(5), r5
- ld.q SP, FRAME_R(6), r6
- ld.q SP, FRAME_R(7), r7
- ld.q SP, FRAME_R(8), r8
- ld.q SP, FRAME_R(9), r9
- ld.q SP, FRAME_R(10), r10
- ld.q SP, FRAME_R(11), r11
- ld.q SP, FRAME_R(12), r12
- ld.q SP, FRAME_R(13), r13
- ld.q SP, FRAME_R(14), r14
-
- ld.q SP, FRAME_R(16), r16
- ld.q SP, FRAME_R(17), r17
- ld.q SP, FRAME_R(18), r18
- ld.q SP, FRAME_R(19), r19
- ld.q SP, FRAME_R(20), r20
- ld.q SP, FRAME_R(21), r21
- ld.q SP, FRAME_R(22), r22
- ld.q SP, FRAME_R(23), r23
- ld.q SP, FRAME_R(24), r24
- ld.q SP, FRAME_R(25), r25
- ld.q SP, FRAME_R(26), r26
- ld.q SP, FRAME_R(27), r27
- ld.q SP, FRAME_R(28), r28
- ld.q SP, FRAME_R(29), r29
- ld.q SP, FRAME_R(30), r30
- ld.q SP, FRAME_R(31), r31
- ld.q SP, FRAME_R(32), r32
- ld.q SP, FRAME_R(33), r33
- ld.q SP, FRAME_R(34), r34
- ld.q SP, FRAME_R(35), r35
- ld.q SP, FRAME_R(36), r36
- ld.q SP, FRAME_R(37), r37
- ld.q SP, FRAME_R(38), r38
- ld.q SP, FRAME_R(39), r39
- ld.q SP, FRAME_R(40), r40
- ld.q SP, FRAME_R(41), r41
- ld.q SP, FRAME_R(42), r42
- ld.q SP, FRAME_R(43), r43
- ld.q SP, FRAME_R(44), r44
- ld.q SP, FRAME_R(45), r45
- ld.q SP, FRAME_R(46), r46
- ld.q SP, FRAME_R(47), r47
- ld.q SP, FRAME_R(48), r48
- ld.q SP, FRAME_R(49), r49
- ld.q SP, FRAME_R(50), r50
- ld.q SP, FRAME_R(51), r51
- ld.q SP, FRAME_R(52), r52
- ld.q SP, FRAME_R(53), r53
- ld.q SP, FRAME_R(54), r54
- ld.q SP, FRAME_R(55), r55
- ld.q SP, FRAME_R(56), r56
- ld.q SP, FRAME_R(57), r57
- ld.q SP, FRAME_R(58), r58
-
- getcon SR, r59
- movi SR_BLOCK_EXC, r60
- or r59, r60, r59
- putcon r59, SR /* SR.BL = 1, keep nesting out */
- ld.q SP, FRAME_S(FSSR), r61
- ld.q SP, FRAME_S(FSPC), r62
- movi SR_ASID_MASK, r60
- and r59, r60, r59
- andc r61, r60, r61 /* Clear out older ASID */
- or r59, r61, r61 /* Retain current ASID */
- putcon r61, SSR
- putcon r62, SPC
-
- /* Ignore FSYSCALL_ID */
-
- ld.q SP, FRAME_R(59), r59
- ld.q SP, FRAME_R(60), r60
- ld.q SP, FRAME_R(61), r61
- ld.q SP, FRAME_R(62), r62
-
- /* Last touch */
- ld.q SP, FRAME_R(15), SP
- rte
- nop
-
-/*
- * Third level handlers for VBR-based exceptions. Adapting args to
- * and/or deflecting to fourth level handlers.
- *
- * Fourth level handlers interface.
- * Most are C-coded handlers directly pointed by the trap_jtable.
- * (Third = Fourth level)
- * Inputs:
- * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
- * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
- * (r3) struct pt_regs *, original register's frame pointer
- * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
- * (r5) TRA control register (for syscall/debug benefit only)
- * (LINK) return address
- * (SP) = r3
- *
- * Kernel TLB fault handlers will get a slightly different interface.
- * (r2) struct pt_regs *, original register's frame pointer
- * (r3) page fault error code (see asm/thread_info.h)
- * (r4) Effective Address of fault
- * (LINK) return address
- * (SP) = r2
- *
- * fpu_error_or_IRQ? is a helper to deflect to the right cause.
- *
- */
-#ifdef CONFIG_MMU
-tlb_miss_load:
- or SP, ZERO, r2
- or ZERO, ZERO, r3 /* Read */
- getcon TEA, r4
- pta call_do_page_fault, tr0
- beq ZERO, ZERO, tr0
-
-tlb_miss_store:
- or SP, ZERO, r2
- movi FAULT_CODE_WRITE, r3 /* Write */
- getcon TEA, r4
- pta call_do_page_fault, tr0
- beq ZERO, ZERO, tr0
-
-itlb_miss_or_IRQ:
- pta its_IRQ, tr0
- beqi/u r4, EVENT_INTERRUPT, tr0
-
- /* ITLB miss */
- or SP, ZERO, r2
- movi FAULT_CODE_ITLB, r3
- getcon TEA, r4
- /* Fall through */
-
-call_do_page_fault:
- movi do_page_fault, r6
- ptabs r6, tr0
- blink tr0, ZERO
-#endif /* CONFIG_MMU */
-
-fpu_error_or_IRQA:
- pta its_IRQ, tr0
- beqi/l r4, EVENT_INTERRUPT, tr0
-#ifdef CONFIG_SH_FPU
- movi fpu_state_restore_trap_handler, r6
-#else
- movi do_exception_error, r6
-#endif
- ptabs r6, tr0
- blink tr0, ZERO
-
-fpu_error_or_IRQB:
- pta its_IRQ, tr0
- beqi/l r4, EVENT_INTERRUPT, tr0
-#ifdef CONFIG_SH_FPU
- movi fpu_state_restore_trap_handler, r6
-#else
- movi do_exception_error, r6
-#endif
- ptabs r6, tr0
- blink tr0, ZERO
-
-its_IRQ:
- movi do_IRQ, r6
- ptabs r6, tr0
- blink tr0, ZERO
-
-/*
- * system_call/unknown_trap third level handler:
- *
- * Inputs:
- * (r2) fault/interrupt code, entry number (TRAP = 11)
- * (r3) struct pt_regs *, original register's frame pointer
- * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
- * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
- * (SP) = r3
- * (LINK) return address: ret_from_exception
- * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
- *
- * Outputs:
- * (*r3) Syscall reply (Saved r2)
- * (LINK) In case of syscall only it can be scrapped.
- * Common second level post handler will be ret_from_syscall.
- * Common (non-trace) exit point to that is syscall_ret (saving
- * result to r2). Common bad exit point is syscall_bad (returning
- * ENOSYS then saved to r2).
- *
- */
-
-unknown_trap:
- /* Unknown Trap or User Trace */
- movi do_unknown_trapa, r6
- ptabs r6, tr0
- ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
- andi r2, 0x1ff, r2 /* r2 = syscall # */
- blink tr0, LINK
-
- pta syscall_ret, tr0
- blink tr0, ZERO
-
- /* New syscall implementation*/
-system_call:
- pta unknown_trap, tr0
- or r5, ZERO, r4 /* TRA (=r5) -> r4 */
- shlri r4, 20, r4
- bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
-
- /* It's a system call */
- st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
- andi r5, 0x1ff, r5 /* syscall # -> r5 */
-
- STI()
-
- pta syscall_allowed, tr0
- movi NR_syscalls - 1, r4 /* Last valid */
- bgeu/l r4, r5, tr0
-
-syscall_bad:
- /* Return ENOSYS ! */
- movi -(ENOSYS), r2 /* Fall-through */
-
- .global syscall_ret
-syscall_ret:
- st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
- ld.q SP, FRAME_S(FSPC), r2
- addi r2, 4, r2 /* Move PC, being pre-execution event */
- st.q SP, FRAME_S(FSPC), r2
- pta ret_from_syscall, tr0
- blink tr0, ZERO
-
-
-/* A different return path for ret_from_fork, because we now need
- * to call schedule_tail with the later kernels. Because prev is
- * loaded into r2 by switch_to() means we can just call it straight away
- */
-
-.global ret_from_fork
-ret_from_fork:
-
- movi schedule_tail,r5
- ori r5, 1, r5
- ptabs r5, tr0
- blink tr0, LINK
-
- ld.q SP, FRAME_S(FSPC), r2
- addi r2, 4, r2 /* Move PC, being pre-execution event */
- st.q SP, FRAME_S(FSPC), r2
- pta ret_from_syscall, tr0
- blink tr0, ZERO
-
-.global ret_from_kernel_thread
-ret_from_kernel_thread:
-
- movi schedule_tail,r5
- ori r5, 1, r5
- ptabs r5, tr0
- blink tr0, LINK
-
- ld.q SP, FRAME_R(2), r2
- ld.q SP, FRAME_R(3), r3
- ptabs r3, tr0
- blink tr0, LINK
-
- ld.q SP, FRAME_S(FSPC), r2
- addi r2, 4, r2 /* Move PC, being pre-execution event */
- st.q SP, FRAME_S(FSPC), r2
- pta ret_from_syscall, tr0
- blink tr0, ZERO
-
-syscall_allowed:
- /* Use LINK to deflect the exit point, default is syscall_ret */
- pta syscall_ret, tr0
- gettr tr0, LINK
- pta syscall_notrace, tr0
-
- getcon KCR0, r2
- ld.l r2, TI_FLAGS, r4
- movi _TIF_WORK_SYSCALL_MASK, r6
- and r6, r4, r6
- beq/l r6, ZERO, tr0
-
- /* Trace it by calling syscall_trace before and after */
- movi do_syscall_trace_enter, r4
- or SP, ZERO, r2
- ptabs r4, tr0
- blink tr0, LINK
-
- /* Save the retval */
- st.q SP, FRAME_R(2), r2
-
- /* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
- ld.q SP, FRAME_S(FSYSCALL_ID), r5
- andi r5, 0x1ff, r5
-
- pta syscall_ret_trace, tr0
- gettr tr0, LINK
-
-syscall_notrace:
- /* Now point to the appropriate 4th level syscall handler */
- movi sys_call_table, r4
- shlli r5, 2, r5
- ldx.l r4, r5, r5
- ptabs r5, tr0
-
- /* Prepare original args */
- ld.q SP, FRAME_R(2), r2
- ld.q SP, FRAME_R(3), r3
- ld.q SP, FRAME_R(4), r4
- ld.q SP, FRAME_R(5), r5
- ld.q SP, FRAME_R(6), r6
- ld.q SP, FRAME_R(7), r7
-
- /* And now the trick for those syscalls requiring regs * ! */
- or SP, ZERO, r8
-
- /* Call it */
- blink tr0, ZERO /* LINK is already properly set */
-
-syscall_ret_trace:
- /* We get back here only if under trace */
- st.q SP, FRAME_R(9), r2 /* Save return value */
-
- movi do_syscall_trace_leave, LINK
- or SP, ZERO, r2
- ptabs LINK, tr0
- blink tr0, LINK
-
- /* This needs to be done after any syscall tracing */
- ld.q SP, FRAME_S(FSPC), r2
- addi r2, 4, r2 /* Move PC, being pre-execution event */
- st.q SP, FRAME_S(FSPC), r2
-
- pta ret_from_syscall, tr0
- blink tr0, ZERO /* Resume normal return sequence */
-
-/*
- * --- Switch to running under a particular ASID and return the previous ASID value
- * --- The caller is assumed to have done a cli before calling this.
- *
- * Input r2 : new ASID
- * Output r2 : old ASID
- */
-
- .global switch_and_save_asid
-switch_and_save_asid:
- getcon sr, r0
- movi 255, r4
- shlli r4, 16, r4 /* r4 = mask to select ASID */
- and r0, r4, r3 /* r3 = shifted old ASID */
- andi r2, 255, r2 /* mask down new ASID */
- shlli r2, 16, r2 /* align new ASID against SR.ASID */
- andc r0, r4, r0 /* efface old ASID from SR */
- or r0, r2, r0 /* insert the new ASID */
- putcon r0, ssr
- movi 1f, r0
- putcon r0, spc
- rte
- nop
-1:
- ptabs LINK, tr0
- shlri r3, 16, r2 /* r2 = old ASID */
- blink tr0, r63
-
- .global route_to_panic_handler
-route_to_panic_handler:
- /* Switch to real mode, goto panic_handler, don't return. Useful for
- last-chance debugging, e.g. if no output wants to go to the console.
- */
-
- movi panic_handler - CONFIG_PAGE_OFFSET, r1
- ptabs r1, tr0
- pta 1f, tr1
- gettr tr1, r0
- putcon r0, spc
- getcon sr, r0
- movi 1, r1
- shlli r1, 31, r1
- andc r0, r1, r0
- putcon r0, ssr
- rte
- nop
-1: /* Now in real mode */
- blink tr0, r63
- nop
-
- .global peek_real_address_q
-peek_real_address_q:
- /* Two args:
- r2 : real mode address to peek
- r2(out) : result quadword
-
- This is provided as a cheapskate way of manipulating device
- registers for debugging (to avoid the need to ioremap the debug
- module, and to avoid the need to ioremap the watchpoint
- controller in a way that identity maps sufficient bits to avoid the
- SH5-101 cut2 silicon defect).
-
- This code is not performance critical
- */
-
- add.l r2, r63, r2 /* sign extend address */
- getcon sr, r0 /* r0 = saved original SR */
- movi 1, r1
- shlli r1, 28, r1
- or r0, r1, r1 /* r0 with block bit set */
- putcon r1, sr /* now in critical section */
- movi 1, r36
- shlli r36, 31, r36
- andc r1, r36, r1 /* turn sr.mmu off in real mode section */
-
- putcon r1, ssr
- movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
- movi 1f, r37 /* virtual mode return addr */
- putcon r36, spc
-
- synco
- rte
- nop
-
-.peek0: /* come here in real mode, don't touch caches!!
- still in critical section (sr.bl==1) */
- putcon r0, ssr
- putcon r37, spc
- /* Here's the actual peek. If the address is bad, all bets are now off
- * what will happen (handlers invoked in real-mode = bad news) */
- ld.q r2, 0, r2
- synco
- rte /* Back to virtual mode */
- nop
-
-1:
- ptabs LINK, tr0
- blink tr0, r63
-
- .global poke_real_address_q
-poke_real_address_q:
- /* Two args:
- r2 : real mode address to poke
- r3 : quadword value to write.
-
- This is provided as a cheapskate way of manipulating device
- registers for debugging (to avoid the need to ioremap the debug
- module, and to avoid the need to ioremap the watchpoint
- controller in a way that identity maps sufficient bits to avoid the
- SH5-101 cut2 silicon defect).
-
- This code is not performance critical
- */
-
- add.l r2, r63, r2 /* sign extend address */
- getcon sr, r0 /* r0 = saved original SR */
- movi 1, r1
- shlli r1, 28, r1
- or r0, r1, r1 /* r0 with block bit set */
- putcon r1, sr /* now in critical section */
- movi 1, r36
- shlli r36, 31, r36
- andc r1, r36, r1 /* turn sr.mmu off in real mode section */
-
- putcon r1, ssr
- movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
- movi 1f, r37 /* virtual mode return addr */
- putcon r36, spc
-
- synco
- rte
- nop
-
-.poke0: /* come here in real mode, don't touch caches!!
- still in critical section (sr.bl==1) */
- putcon r0, ssr
- putcon r37, spc
- /* Here's the actual poke. If the address is bad, all bets are now off
- * what will happen (handlers invoked in real-mode = bad news) */
- st.q r2, 0, r3
- synco
- rte /* Back to virtual mode */
- nop
-
-1:
- ptabs LINK, tr0
- blink tr0, r63
-
-#ifdef CONFIG_MMU
-/*
- * --- User Access Handling Section
- */
-
-/*
- * User Access support. It all moved to non inlined Assembler
- * functions in here.
- *
- * __kernel_size_t __copy_user(void *__to, const void *__from,
- * __kernel_size_t __n)
- *
- * Inputs:
- * (r2) target address
- * (r3) source address
- * (r4) size in bytes
- *
- * Ouputs:
- * (*r2) target data
- * (r2) non-copied bytes
- *
- * If a fault occurs on the user pointer, bail out early and return the
- * number of bytes not copied in r2.
- * Strategy : for large blocks, call a real memcpy function which can
- * move >1 byte at a time using unaligned ld/st instructions, and can
- * manipulate the cache using prefetch + alloco to improve the speed
- * further. If a fault occurs in that function, just revert to the
- * byte-by-byte approach used for small blocks; this is rare so the
- * performance hit for that case does not matter.
- *
- * For small blocks it's not worth the overhead of setting up and calling
- * the memcpy routine; do the copy a byte at a time.
- *
- */
- .global __copy_user
-__copy_user:
- pta __copy_user_byte_by_byte, tr1
- movi 16, r0 ! this value is a best guess, should tune it by benchmarking
- bge/u r0, r4, tr1
- pta copy_user_memcpy, tr0
- addi SP, -32, SP
- /* Save arguments in case we have to fix-up unhandled page fault */
- st.q SP, 0, r2
- st.q SP, 8, r3
- st.q SP, 16, r4
- st.q SP, 24, r35 ! r35 is callee-save
- /* Save LINK in a register to reduce RTS time later (otherwise
- ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
- ori LINK, 0, r35
- blink tr0, LINK
-
- /* Copy completed normally if we get back here */
- ptabs r35, tr0
- ld.q SP, 24, r35
- /* don't restore r2-r4, pointless */
- /* set result=r2 to zero as the copy must have succeeded. */
- or r63, r63, r2
- addi SP, 32, SP
- blink tr0, r63 ! RTS
-
- .global __copy_user_fixup
-__copy_user_fixup:
- /* Restore stack frame */
- ori r35, 0, LINK
- ld.q SP, 24, r35
- ld.q SP, 16, r4
- ld.q SP, 8, r3
- ld.q SP, 0, r2
- addi SP, 32, SP
- /* Fall through to original code, in the 'same' state we entered with */
-
-/* The slow byte-by-byte method is used if the fast copy traps due to a bad
- user address. In that rare case, the speed drop can be tolerated. */
-__copy_user_byte_by_byte:
- pta ___copy_user_exit, tr1
- pta ___copy_user1, tr0
- beq/u r4, r63, tr1 /* early exit for zero length copy */
- sub r2, r3, r0
- addi r0, -1, r0
-
-___copy_user1:
- ld.b r3, 0, r5 /* Fault address 1 */
-
- /* Could rewrite this to use just 1 add, but the second comes 'free'
- due to load latency */
- addi r3, 1, r3
- addi r4, -1, r4 /* No real fixup required */
-___copy_user2:
- stx.b r3, r0, r5 /* Fault address 2 */
- bne r4, ZERO, tr0
-
-___copy_user_exit:
- or r4, ZERO, r2
- ptabs LINK, tr0
- blink tr0, ZERO
-
-/*
- * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
- *
- * Inputs:
- * (r2) target address
- * (r3) size in bytes
- *
- * Ouputs:
- * (*r2) zero-ed target data
- * (r2) non-zero-ed bytes
- */
- .global __clear_user
-__clear_user:
- pta ___clear_user_exit, tr1
- pta ___clear_user1, tr0
- beq/u r3, r63, tr1
-
-___clear_user1:
- st.b r2, 0, ZERO /* Fault address */
- addi r2, 1, r2
- addi r3, -1, r3 /* No real fixup required */
- bne r3, ZERO, tr0
-
-___clear_user_exit:
- or r3, ZERO, r2
- ptabs LINK, tr0
- blink tr0, ZERO
-
-#endif /* CONFIG_MMU */
-
-/*
- * extern long __get_user_asm_?(void *val, long addr)
- *
- * Inputs:
- * (r2) dest address
- * (r3) source address (in User Space)
- *
- * Ouputs:
- * (r2) -EFAULT (faulting)
- * 0 (not faulting)
- */
- .global __get_user_asm_b
-__get_user_asm_b:
- or r2, ZERO, r4
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___get_user_asm_b1:
- ld.b r3, 0, r5 /* r5 = data */
- st.b r4, 0, r5
- or ZERO, ZERO, r2
-
-___get_user_asm_b_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __get_user_asm_w
-__get_user_asm_w:
- or r2, ZERO, r4
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___get_user_asm_w1:
- ld.w r3, 0, r5 /* r5 = data */
- st.w r4, 0, r5
- or ZERO, ZERO, r2
-
-___get_user_asm_w_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __get_user_asm_l
-__get_user_asm_l:
- or r2, ZERO, r4
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___get_user_asm_l1:
- ld.l r3, 0, r5 /* r5 = data */
- st.l r4, 0, r5
- or ZERO, ZERO, r2
-
-___get_user_asm_l_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __get_user_asm_q
-__get_user_asm_q:
- or r2, ZERO, r4
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___get_user_asm_q1:
- ld.q r3, 0, r5 /* r5 = data */
- st.q r4, 0, r5
- or ZERO, ZERO, r2
-
-___get_user_asm_q_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-/*
- * extern long __put_user_asm_?(void *pval, long addr)
- *
- * Inputs:
- * (r2) kernel pointer to value
- * (r3) dest address (in User Space)
- *
- * Ouputs:
- * (r2) -EFAULT (faulting)
- * 0 (not faulting)
- */
- .global __put_user_asm_b
-__put_user_asm_b:
- ld.b r2, 0, r4 /* r4 = data */
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___put_user_asm_b1:
- st.b r3, 0, r4
- or ZERO, ZERO, r2
-
-___put_user_asm_b_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __put_user_asm_w
-__put_user_asm_w:
- ld.w r2, 0, r4 /* r4 = data */
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___put_user_asm_w1:
- st.w r3, 0, r4
- or ZERO, ZERO, r2
-
-___put_user_asm_w_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __put_user_asm_l
-__put_user_asm_l:
- ld.l r2, 0, r4 /* r4 = data */
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___put_user_asm_l1:
- st.l r3, 0, r4
- or ZERO, ZERO, r2
-
-___put_user_asm_l_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-
- .global __put_user_asm_q
-__put_user_asm_q:
- ld.q r2, 0, r4 /* r4 = data */
- movi -(EFAULT), r2 /* r2 = reply, no real fixup */
-
-___put_user_asm_q1:
- st.q r3, 0, r4
- or ZERO, ZERO, r2
-
-___put_user_asm_q_exit:
- ptabs LINK, tr0
- blink tr0, ZERO
-
-panic_stash_regs:
- /* The idea is : when we get an unhandled panic, we dump the registers
- to a known memory location, the just sit in a tight loop.
- This allows the human to look at the memory region through the GDB
- session (assuming the debug module's SHwy initiator isn't locked up
- or anything), to hopefully analyze the cause of the panic. */
-
- /* On entry, former r15 (SP) is in DCR
- former r0 is at resvec_saved_area + 0
- former r1 is at resvec_saved_area + 8
- former tr0 is at resvec_saved_area + 32
- DCR is the only register whose value is lost altogether.
- */
-
- movi 0xffffffff80000000, r0 ! phy of dump area
- ld.q SP, 0x000, r1 ! former r0
- st.q r0, 0x000, r1
- ld.q SP, 0x008, r1 ! former r1
- st.q r0, 0x008, r1
- st.q r0, 0x010, r2
- st.q r0, 0x018, r3
- st.q r0, 0x020, r4
- st.q r0, 0x028, r5
- st.q r0, 0x030, r6
- st.q r0, 0x038, r7
- st.q r0, 0x040, r8
- st.q r0, 0x048, r9
- st.q r0, 0x050, r10
- st.q r0, 0x058, r11
- st.q r0, 0x060, r12
- st.q r0, 0x068, r13
- st.q r0, 0x070, r14
- getcon dcr, r14
- st.q r0, 0x078, r14
- st.q r0, 0x080, r16
- st.q r0, 0x088, r17
- st.q r0, 0x090, r18
- st.q r0, 0x098, r19
- st.q r0, 0x0a0, r20
- st.q r0, 0x0a8, r21
- st.q r0, 0x0b0, r22
- st.q r0, 0x0b8, r23
- st.q r0, 0x0c0, r24
- st.q r0, 0x0c8, r25
- st.q r0, 0x0d0, r26
- st.q r0, 0x0d8, r27
- st.q r0, 0x0e0, r28
- st.q r0, 0x0e8, r29
- st.q r0, 0x0f0, r30
- st.q r0, 0x0f8, r31
- st.q r0, 0x100, r32
- st.q r0, 0x108, r33
- st.q r0, 0x110, r34
- st.q r0, 0x118, r35
- st.q r0, 0x120, r36
- st.q r0, 0x128, r37
- st.q r0, 0x130, r38
- st.q r0, 0x138, r39
- st.q r0, 0x140, r40
- st.q r0, 0x148, r41
- st.q r0, 0x150, r42
- st.q r0, 0x158, r43
- st.q r0, 0x160, r44
- st.q r0, 0x168, r45
- st.q r0, 0x170, r46
- st.q r0, 0x178, r47
- st.q r0, 0x180, r48
- st.q r0, 0x188, r49
- st.q r0, 0x190, r50
- st.q r0, 0x198, r51
- st.q r0, 0x1a0, r52
- st.q r0, 0x1a8, r53
- st.q r0, 0x1b0, r54
- st.q r0, 0x1b8, r55
- st.q r0, 0x1c0, r56
- st.q r0, 0x1c8, r57
- st.q r0, 0x1d0, r58
- st.q r0, 0x1d8, r59
- st.q r0, 0x1e0, r60
- st.q r0, 0x1e8, r61
- st.q r0, 0x1f0, r62
- st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
-
- ld.q SP, 0x020, r1 ! former tr0
- st.q r0, 0x200, r1
- gettr tr1, r1
- st.q r0, 0x208, r1
- gettr tr2, r1
- st.q r0, 0x210, r1
- gettr tr3, r1
- st.q r0, 0x218, r1
- gettr tr4, r1
- st.q r0, 0x220, r1
- gettr tr5, r1
- st.q r0, 0x228, r1
- gettr tr6, r1
- st.q r0, 0x230, r1
- gettr tr7, r1
- st.q r0, 0x238, r1
-
- getcon sr, r1
- getcon ssr, r2
- getcon pssr, r3
- getcon spc, r4
- getcon pspc, r5
- getcon intevt, r6
- getcon expevt, r7
- getcon pexpevt, r8
- getcon tra, r9
- getcon tea, r10
- getcon kcr0, r11
- getcon kcr1, r12
- getcon vbr, r13
- getcon resvec, r14
-
- st.q r0, 0x240, r1
- st.q r0, 0x248, r2
- st.q r0, 0x250, r3
- st.q r0, 0x258, r4
- st.q r0, 0x260, r5
- st.q r0, 0x268, r6
- st.q r0, 0x270, r7
- st.q r0, 0x278, r8
- st.q r0, 0x280, r9
- st.q r0, 0x288, r10
- st.q r0, 0x290, r11
- st.q r0, 0x298, r12
- st.q r0, 0x2a0, r13
- st.q r0, 0x2a8, r14
-
- getcon SPC,r2
- getcon SSR,r3
- getcon EXPEVT,r4
- /* Prepare to jump to C - physical address */
- movi panic_handler-CONFIG_PAGE_OFFSET, r1
- ori r1, 1, r1
- ptabs r1, tr0
- getcon DCR, SP
- blink tr0, ZERO
- nop
- nop
- nop
- nop
-
-
-
-
-/*
- * --- Signal Handling Section
- */
-
-/*
- * extern long long _sa_default_rt_restorer
- * extern long long _sa_default_restorer
- *
- * or, better,
- *
- * extern void _sa_default_rt_restorer(void)
- * extern void _sa_default_restorer(void)
- *
- * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
- * from user space. Copied into user space by signal management.
- * Both must be quad aligned and 2 quad long (4 instructions).
- *
- */
- .balign 8
- .global sa_default_rt_restorer
-sa_default_rt_restorer:
- movi 0x10, r9
- shori __NR_rt_sigreturn, r9
- trapa r9
- nop
-
- .balign 8
- .global sa_default_restorer
-sa_default_restorer:
- movi 0x10, r9
- shori __NR_sigreturn, r9
- trapa r9
- nop
-
-/*
- * --- __ex_table Section
- */
-
-/*
- * User Access Exception Table.
- */
- .section __ex_table, "a"
-
- .global asm_uaccess_start /* Just a marker */
-asm_uaccess_start:
-
-#ifdef CONFIG_MMU
- .long ___copy_user1, ___copy_user_exit
- .long ___copy_user2, ___copy_user_exit
- .long ___clear_user1, ___clear_user_exit
-#endif
- .long ___get_user_asm_b1, ___get_user_asm_b_exit
- .long ___get_user_asm_w1, ___get_user_asm_w_exit
- .long ___get_user_asm_l1, ___get_user_asm_l_exit
- .long ___get_user_asm_q1, ___get_user_asm_q_exit
- .long ___put_user_asm_b1, ___put_user_asm_b_exit
- .long ___put_user_asm_w1, ___put_user_asm_w_exit
- .long ___put_user_asm_l1, ___put_user_asm_l_exit
- .long ___put_user_asm_q1, ___put_user_asm_q_exit
-
- .global asm_uaccess_end /* Just a marker */
-asm_uaccess_end:
-
-
-
-
-/*
- * --- .init.text Section
- */
-
- __INIT
-
-/*
- * void trap_init (void)
- *
- */
- .global trap_init
-trap_init:
- addi SP, -24, SP /* Room to save r28/r29/r30 */
- st.q SP, 0, r28
- st.q SP, 8, r29
- st.q SP, 16, r30
-
- /* Set VBR and RESVEC */
- movi LVBR_block, r19
- andi r19, -4, r19 /* reset MMUOFF + reserved */
- /* For RESVEC exceptions we force the MMU off, which means we need the
- physical address. */
- movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
- andi r20, -4, r20 /* reset reserved */
- ori r20, 1, r20 /* set MMUOFF */
- putcon r19, VBR
- putcon r20, RESVEC
-
- /* Sanity check */
- movi LVBR_block_end, r21
- andi r21, -4, r21
- movi BLOCK_SIZE, r29 /* r29 = expected size */
- or r19, ZERO, r30
- add r19, r29, r19
-
- /*
- * Ugly, but better loop forever now than crash afterwards.
- * We should print a message, but if we touch LVBR or
- * LRESVEC blocks we should not be surprised if we get stuck
- * in trap_init().
- */
- pta trap_init_loop, tr1
- gettr tr1, r28 /* r28 = trap_init_loop */
- sub r21, r30, r30 /* r30 = actual size */
-
- /*
- * VBR/RESVEC handlers overlap by being bigger than
- * allowed. Very bad. Just loop forever.
- * (r28) panic/loop address
- * (r29) expected size
- * (r30) actual size
- */
-trap_init_loop:
- bne r19, r21, tr1
-
- /* Now that exception vectors are set up reset SR.BL */
- getcon SR, r22
- movi SR_UNBLOCK_EXC, r23
- and r22, r23, r22
- putcon r22, SR
-
- addi SP, 24, SP
- ptabs LINK, tr0
- blink tr0, ZERO
-
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
deleted file mode 100644
index 3966b5ee8e93..000000000000
--- a/arch/sh/kernel/cpu/sh5/fpu.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/sh5/fpu.c
- *
- * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
- * Copyright (C) 2002 STMicroelectronics Limited
- * Author : Stuart Menefy
- *
- * Started from SH4 version:
- * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
- */
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <asm/processor.h>
-
-void save_fpu(struct task_struct *tsk)
-{
- asm volatile("fst.p %0, (0*8), fp0\n\t"
- "fst.p %0, (1*8), fp2\n\t"
- "fst.p %0, (2*8), fp4\n\t"
- "fst.p %0, (3*8), fp6\n\t"
- "fst.p %0, (4*8), fp8\n\t"
- "fst.p %0, (5*8), fp10\n\t"
- "fst.p %0, (6*8), fp12\n\t"
- "fst.p %0, (7*8), fp14\n\t"
- "fst.p %0, (8*8), fp16\n\t"
- "fst.p %0, (9*8), fp18\n\t"
- "fst.p %0, (10*8), fp20\n\t"
- "fst.p %0, (11*8), fp22\n\t"
- "fst.p %0, (12*8), fp24\n\t"
- "fst.p %0, (13*8), fp26\n\t"
- "fst.p %0, (14*8), fp28\n\t"
- "fst.p %0, (15*8), fp30\n\t"
- "fst.p %0, (16*8), fp32\n\t"
- "fst.p %0, (17*8), fp34\n\t"
- "fst.p %0, (18*8), fp36\n\t"
- "fst.p %0, (19*8), fp38\n\t"
- "fst.p %0, (20*8), fp40\n\t"
- "fst.p %0, (21*8), fp42\n\t"
- "fst.p %0, (22*8), fp44\n\t"
- "fst.p %0, (23*8), fp46\n\t"
- "fst.p %0, (24*8), fp48\n\t"
- "fst.p %0, (25*8), fp50\n\t"
- "fst.p %0, (26*8), fp52\n\t"
- "fst.p %0, (27*8), fp54\n\t"
- "fst.p %0, (28*8), fp56\n\t"
- "fst.p %0, (29*8), fp58\n\t"
- "fst.p %0, (30*8), fp60\n\t"
- "fst.p %0, (31*8), fp62\n\t"
-
- "fgetscr fr63\n\t"
- "fst.s %0, (32*8), fr63\n\t"
- : /* no output */
- : "r" (&tsk->thread.xstate->hardfpu)
- : "memory");
-}
-
-void restore_fpu(struct task_struct *tsk)
-{
- asm volatile("fld.p %0, (0*8), fp0\n\t"
- "fld.p %0, (1*8), fp2\n\t"
- "fld.p %0, (2*8), fp4\n\t"
- "fld.p %0, (3*8), fp6\n\t"
- "fld.p %0, (4*8), fp8\n\t"
- "fld.p %0, (5*8), fp10\n\t"
- "fld.p %0, (6*8), fp12\n\t"
- "fld.p %0, (7*8), fp14\n\t"
- "fld.p %0, (8*8), fp16\n\t"
- "fld.p %0, (9*8), fp18\n\t"
- "fld.p %0, (10*8), fp20\n\t"
- "fld.p %0, (11*8), fp22\n\t"
- "fld.p %0, (12*8), fp24\n\t"
- "fld.p %0, (13*8), fp26\n\t"
- "fld.p %0, (14*8), fp28\n\t"
- "fld.p %0, (15*8), fp30\n\t"
- "fld.p %0, (16*8), fp32\n\t"
- "fld.p %0, (17*8), fp34\n\t"
- "fld.p %0, (18*8), fp36\n\t"
- "fld.p %0, (19*8), fp38\n\t"
- "fld.p %0, (20*8), fp40\n\t"
- "fld.p %0, (21*8), fp42\n\t"
- "fld.p %0, (22*8), fp44\n\t"
- "fld.p %0, (23*8), fp46\n\t"
- "fld.p %0, (24*8), fp48\n\t"
- "fld.p %0, (25*8), fp50\n\t"
- "fld.p %0, (26*8), fp52\n\t"
- "fld.p %0, (27*8), fp54\n\t"
- "fld.p %0, (28*8), fp56\n\t"
- "fld.p %0, (29*8), fp58\n\t"
- "fld.p %0, (30*8), fp60\n\t"
-
- "fld.s %0, (32*8), fr63\n\t"
- "fputscr fr63\n\t"
-
- "fld.p %0, (31*8), fp62\n\t"
- : /* no output */
- : "r" (&tsk->thread.xstate->hardfpu)
- : "memory");
-}
-
-asmlinkage void do_fpu_error(unsigned long ex, struct pt_regs *regs)
-{
- regs->pc += 4;
-
- force_sig(SIGFPE);
-}
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
deleted file mode 100644
index 947250188065..000000000000
--- a/arch/sh/kernel/cpu/sh5/probe.c
+++ /dev/null
@@ -1,72 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/sh5/probe.c
- *
- * CPU Subtype Probing for SH-5.
- *
- * Copyright (C) 2000, 2001 Paolo Alberelli
- * Copyright (C) 2003 - 2007 Paul Mundt
- */
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/string.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/tlb.h>
-
-void cpu_probe(void)
-{
- unsigned long long cir;
-
- /*
- * Do peeks in real mode to avoid having to set up a mapping for
- * the WPC registers. On SH5-101 cut2, such a mapping would be
- * exposed to an address translation erratum which would make it
- * hard to set up correctly.
- */
- cir = peek_real_address_q(0x0d000008);
- if ((cir & 0xffff) == 0x5103)
- boot_cpu_data.type = CPU_SH5_103;
- else if (((cir >> 32) & 0xffff) == 0x51e2)
- /* CPU.VCR aliased at CIR address on SH5-101 */
- boot_cpu_data.type = CPU_SH5_101;
-
- boot_cpu_data.family = CPU_FAMILY_SH5;
-
- /*
- * First, setup some sane values for the I-cache.
- */
- boot_cpu_data.icache.ways = 4;
- boot_cpu_data.icache.sets = 256;
- boot_cpu_data.icache.linesz = L1_CACHE_BYTES;
- boot_cpu_data.icache.way_incr = (1 << 13);
- boot_cpu_data.icache.entry_shift = 5;
- boot_cpu_data.icache.way_size = boot_cpu_data.icache.sets *
- boot_cpu_data.icache.linesz;
- boot_cpu_data.icache.entry_mask = 0x1fe0;
- boot_cpu_data.icache.flags = 0;
-
- /*
- * Next, setup some sane values for the D-cache.
- *
- * On the SH5, these are pretty consistent with the I-cache settings,
- * so we just copy over the existing definitions.. these can be fixed
- * up later, especially if we add runtime CPU probing.
- *
- * Though in the meantime it saves us from having to duplicate all of
- * the above definitions..
- */
- boot_cpu_data.dcache = boot_cpu_data.icache;
-
- /*
- * Setup any cache-related flags here
- */
-#if defined(CONFIG_CACHE_WRITETHROUGH)
- set_bit(SH_CACHE_MODE_WT, &(boot_cpu_data.dcache.flags));
-#elif defined(CONFIG_CACHE_WRITEBACK)
- set_bit(SH_CACHE_MODE_WB, &(boot_cpu_data.dcache.flags));
-#endif
-
- /* Setup some I/D TLB defaults */
- sh64_tlb_init();
-}
diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c
deleted file mode 100644
index dc8476d67244..000000000000
--- a/arch/sh/kernel/cpu/sh5/setup-sh5.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * SH5-101/SH5-103 CPU Setup
- *
- * Copyright (C) 2009 Paul Mundt
- */
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial.h>
-#include <linux/serial_sci.h>
-#include <linux/io.h>
-#include <linux/mm.h>
-#include <linux/sh_timer.h>
-#include <asm/addrspace.h>
-#include <asm/platform_early.h>
-
-static struct plat_sci_port scif0_platform_data = {
- .flags = UPF_IOREMAP,
- .scscr = SCSCR_REIE,
- .type = PORT_SCIF,
-};
-
-static struct resource scif0_resources[] = {
- DEFINE_RES_MEM(PHYS_PERIPHERAL_BLOCK + 0x01030000, 0x100),
- DEFINE_RES_IRQ(39),
- DEFINE_RES_IRQ(40),
- DEFINE_RES_IRQ(42),
-};
-
-static struct platform_device scif0_device = {
- .name = "sh-sci",
- .id = 0,
- .resource = scif0_resources,
- .num_resources = ARRAY_SIZE(scif0_resources),
- .dev = {
- .platform_data = &scif0_platform_data,
- },
-};
-
-static struct resource rtc_resources[] = {
- [0] = {
- .start = PHYS_PERIPHERAL_BLOCK + 0x01040000,
- .end = PHYS_PERIPHERAL_BLOCK + 0x01040000 + 0x58 - 1,
- .flags = IORESOURCE_IO,
- },
- [1] = {
- /* Period IRQ */
- .start = IRQ_PRI,
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- /* Carry IRQ */
- .start = IRQ_CUI,
- .flags = IORESOURCE_IRQ,
- },
- [3] = {
- /* Alarm IRQ */
- .start = IRQ_ATI,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device rtc_device = {
- .name = "sh-rtc",
- .id = -1,
- .num_resources = ARRAY_SIZE(rtc_resources),
- .resource = rtc_resources,
-};
-
-#define TMU_BLOCK_OFF 0x01020000
-#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
-
-static struct sh_timer_config tmu0_platform_data = {
- .channels_mask = 7,
-};
-
-static struct resource tmu0_resources[] = {
- DEFINE_RES_MEM(TMU_BASE, 0x30),
- DEFINE_RES_IRQ(IRQ_TUNI0),
- DEFINE_RES_IRQ(IRQ_TUNI1),
- DEFINE_RES_IRQ(IRQ_TUNI2),
-};
-
-static struct platform_device tmu0_device = {
- .name = "sh-tmu",
- .id = 0,
- .dev = {
- .platform_data = &tmu0_platform_data,
- },
- .resource = tmu0_resources,
- .num_resources = ARRAY_SIZE(tmu0_resources),
-};
-
-static struct platform_device *sh5_early_devices[] __initdata = {
- &scif0_device,
- &tmu0_device,
-};
-
-static struct platform_device *sh5_devices[] __initdata = {
- &rtc_device,
-};
-
-static int __init sh5_devices_setup(void)
-{
- int ret;
-
- ret = platform_add_devices(sh5_early_devices,
- ARRAY_SIZE(sh5_early_devices));
- if (unlikely(ret != 0))
- return ret;
-
- return platform_add_devices(sh5_devices,
- ARRAY_SIZE(sh5_devices));
-}
-arch_initcall(sh5_devices_setup);
-
-void __init plat_early_device_setup(void)
-{
- sh_early_platform_add_devices(sh5_early_devices,
- ARRAY_SIZE(sh5_early_devices));
-}
diff --git a/arch/sh/kernel/cpu/sh5/switchto.S b/arch/sh/kernel/cpu/sh5/switchto.S
deleted file mode 100644
index d1beff755632..000000000000
--- a/arch/sh/kernel/cpu/sh5/switchto.S
+++ /dev/null
@@ -1,195 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * arch/sh/kernel/cpu/sh5/switchto.S
- *
- * sh64 context switch
- *
- * Copyright (C) 2004 Richard Curnow
-*/
-
- .section .text..SHmedia32,"ax"
- .little
-
- .balign 32
-
- .type sh64_switch_to,@function
- .global sh64_switch_to
- .global __sh64_switch_to_end
-sh64_switch_to:
-
-/* Incoming args
- r2 - prev
- r3 - &prev->thread
- r4 - next
- r5 - &next->thread
-
- Outgoing results
- r2 - last (=prev) : this just stays in r2 throughout
-
- Want to create a full (struct pt_regs) on the stack to allow backtracing
- functions to work. However, we only need to populate the callee-save
- register slots in this structure; since we're a function our ancestors must
- have themselves preserved all caller saved state in the stack. This saves
- some wasted effort since we won't need to look at the values.
-
- In particular, all caller-save registers are immediately available for
- scratch use.
-
-*/
-
-#define FRAME_SIZE (76*8 + 8)
-
- movi FRAME_SIZE, r0
- sub.l r15, r0, r15
- ! Do normal-style register save to support backtrace
-
- st.l r15, 0, r18 ! save link reg
- st.l r15, 4, r14 ! save fp
- add.l r15, r63, r14 ! setup frame pointer
-
- ! hopefully this looks normal to the backtrace now.
-
- addi.l r15, 8, r1 ! base of pt_regs
- addi.l r1, 24, r0 ! base of pt_regs.regs
- addi.l r0, (63*8), r8 ! base of pt_regs.trregs
-
- /* Note : to be fixed?
- struct pt_regs is really designed for holding the state on entry
- to an exception, i.e. pc,sr,regs etc. However, for the context
- switch state, some of this is not required. But the unwinder takes
- struct pt_regs * as an arg so we have to build this structure
- to allow unwinding switched tasks in show_state() */
-
- st.q r0, ( 9*8), r9
- st.q r0, (10*8), r10
- st.q r0, (11*8), r11
- st.q r0, (12*8), r12
- st.q r0, (13*8), r13
- st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
- ! the point where the process is left in suspended animation, i.e. current
- ! fp here, not the saved one.
- st.q r0, (16*8), r16
-
- st.q r0, (24*8), r24
- st.q r0, (25*8), r25
- st.q r0, (26*8), r26
- st.q r0, (27*8), r27
- st.q r0, (28*8), r28
- st.q r0, (29*8), r29
- st.q r0, (30*8), r30
- st.q r0, (31*8), r31
- st.q r0, (32*8), r32
- st.q r0, (33*8), r33
- st.q r0, (34*8), r34
- st.q r0, (35*8), r35
-
- st.q r0, (44*8), r44
- st.q r0, (45*8), r45
- st.q r0, (46*8), r46
- st.q r0, (47*8), r47
- st.q r0, (48*8), r48
- st.q r0, (49*8), r49
- st.q r0, (50*8), r50
- st.q r0, (51*8), r51
- st.q r0, (52*8), r52
- st.q r0, (53*8), r53
- st.q r0, (54*8), r54
- st.q r0, (55*8), r55
- st.q r0, (56*8), r56
- st.q r0, (57*8), r57
- st.q r0, (58*8), r58
- st.q r0, (59*8), r59
-
- ! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
- ! Use a local label to avoid creating a symbol that will confuse the !
- ! backtrace
- pta .Lsave_pc, tr0
-
- gettr tr5, r45
- gettr tr6, r46
- gettr tr7, r47
- st.q r8, (5*8), r45
- st.q r8, (6*8), r46
- st.q r8, (7*8), r47
-
- ! Now switch context
- gettr tr0, r9
- st.l r3, 0, r15 ! prev->thread.sp
- st.l r3, 8, r1 ! prev->thread.kregs
- st.l r3, 4, r9 ! prev->thread.pc
- st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
-
- ! Load PC for next task (init value or save_pc later)
- ld.l r5, 4, r18 ! next->thread.pc
- ! Switch stacks
- ld.l r5, 0, r15 ! next->thread.sp
- ptabs r18, tr0
-
- ! Update current
- ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
- putcon r9, kcr0 ! current = next->thread_info
-
- ! go to save_pc for a reschedule, or the initial thread.pc for a new process
- blink tr0, r63
-
- ! Restore (when we come back to a previously saved task)
-.Lsave_pc:
- addi.l r15, 32, r0 ! r0 = next's regs
- addi.l r0, (63*8), r8 ! r8 = next's tr_regs
-
- ld.q r8, (5*8), r45
- ld.q r8, (6*8), r46
- ld.q r8, (7*8), r47
- ptabs r45, tr5
- ptabs r46, tr6
- ptabs r47, tr7
-
- ld.q r0, ( 9*8), r9
- ld.q r0, (10*8), r10
- ld.q r0, (11*8), r11
- ld.q r0, (12*8), r12
- ld.q r0, (13*8), r13
- ld.q r0, (14*8), r14
- ld.q r0, (16*8), r16
-
- ld.q r0, (24*8), r24
- ld.q r0, (25*8), r25
- ld.q r0, (26*8), r26
- ld.q r0, (27*8), r27
- ld.q r0, (28*8), r28
- ld.q r0, (29*8), r29
- ld.q r0, (30*8), r30
- ld.q r0, (31*8), r31
- ld.q r0, (32*8), r32
- ld.q r0, (33*8), r33
- ld.q r0, (34*8), r34
- ld.q r0, (35*8), r35
-
- ld.q r0, (44*8), r44
- ld.q r0, (45*8), r45
- ld.q r0, (46*8), r46
- ld.q r0, (47*8), r47
- ld.q r0, (48*8), r48
- ld.q r0, (49*8), r49
- ld.q r0, (50*8), r50
- ld.q r0, (51*8), r51
- ld.q r0, (52*8), r52
- ld.q r0, (53*8), r53
- ld.q r0, (54*8), r54
- ld.q r0, (55*8), r55
- ld.q r0, (56*8), r56
- ld.q r0, (57*8), r57
- ld.q r0, (58*8), r58
- ld.q r0, (59*8), r59
-
- ! epilogue
- ld.l r15, 0, r18
- ld.l r15, 4, r14
- ptabs r18, tr0
- movi FRAME_SIZE, r0
- add r15, r0, r15
- blink tr0, r63
-__sh64_switch_to_end:
-.LFE1:
- .size sh64_switch_to,.LFE1-sh64_switch_to
-
diff --git a/arch/sh/kernel/cpu/sh5/unwind.c b/arch/sh/kernel/cpu/sh5/unwind.c
deleted file mode 100644
index 3cb0cd9cea29..000000000000
--- a/arch/sh/kernel/cpu/sh5/unwind.c
+++ /dev/null
@@ -1,342 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/sh/kernel/cpu/sh5/unwind.c
- *
- * Copyright (C) 2004 Paul Mundt
- * Copyright (C) 2004 Richard Curnow
- */
-#include <linux/kallsyms.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <asm/page.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/io.h>
-#include <asm/unwinder.h>
-#include <asm/stacktrace.h>
-
-static u8 regcache[63];
-
-/*
- * Finding the previous stack frame isn't horribly straightforward as it is
- * on some other platforms. In the sh64 case, we don't have "linked" stack
- * frames, so we need to do a bit of work to determine the previous frame,
- * and in turn, the previous r14/r18 pair.
- *
- * There are generally a few cases which determine where we can find out
- * the r14/r18 values. In the general case, this can be determined by poking
- * around the prologue of the symbol PC is in (note that we absolutely must
- * have frame pointer support as well as the kernel symbol table mapped,
- * otherwise we can't even get this far).
- *
- * In other cases, such as the interrupt/exception path, we can poke around
- * the sp/fp.
- *
- * Notably, this entire approach is somewhat error prone, and in the event
- * that the previous frame cannot be determined, that's all we can do.
- * Either way, this still leaves us with a more correct backtrace then what
- * we would be able to come up with by walking the stack (which is garbage
- * for anything beyond the first frame).
- * -- PFM.
- */
-static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
- unsigned long *pprev_fp, unsigned long *pprev_pc,
- struct pt_regs *regs)
-{
- const char *sym;
- char namebuf[128];
- unsigned long offset;
- unsigned long prologue = 0;
- unsigned long fp_displacement = 0;
- unsigned long fp_prev = 0;
- unsigned long offset_r14 = 0, offset_r18 = 0;
- int i, found_prologue_end = 0;
-
- sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
- if (!sym)
- return -EINVAL;
-
- prologue = pc - offset;
- if (!prologue)
- return -EINVAL;
-
- /* Validate fp, to avoid risk of dereferencing a bad pointer later.
- Assume 128Mb since that's the amount of RAM on a Cayman. Modify
- when there is an SH-5 board with more. */
- if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
- (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
- ((fp & 7) != 0)) {
- return -EINVAL;
- }
-
- /*
- * Depth to walk, depth is completely arbitrary.
- */
- for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
- unsigned long op;
- u8 major, minor;
- u8 src, dest, disp;
-
- op = *(unsigned long *)prologue;
-
- major = (op >> 26) & 0x3f;
- src = (op >> 20) & 0x3f;
- minor = (op >> 16) & 0xf;
- disp = (op >> 10) & 0x3f;
- dest = (op >> 4) & 0x3f;
-
- /*
- * Stack frame creation happens in a number of ways.. in the
- * general case when the stack frame is less than 511 bytes,
- * it's generally created by an addi or addi.l:
- *
- * addi/addi.l r15, -FRAME_SIZE, r15
- *
- * in the event that the frame size is bigger than this, it's
- * typically created using a movi/sub pair as follows:
- *
- * movi FRAME_SIZE, rX
- * sub r15, rX, r15
- */
-
- switch (major) {
- case (0x00 >> 2):
- switch (minor) {
- case 0x8: /* add.l */
- case 0x9: /* add */
- /* Look for r15, r63, r14 */
- if (src == 15 && disp == 63 && dest == 14)
- found_prologue_end = 1;
-
- break;
- case 0xa: /* sub.l */
- case 0xb: /* sub */
- if (src != 15 || dest != 15)
- continue;
-
- fp_displacement -= regcache[disp];
- fp_prev = fp - fp_displacement;
- break;
- }
- break;
- case (0xa8 >> 2): /* st.l */
- if (src != 15)
- continue;
-
- switch (dest) {
- case 14:
- if (offset_r14 || fp_displacement == 0)
- continue;
-
- offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
- offset_r14 *= sizeof(unsigned long);
- offset_r14 += fp_displacement;
- break;
- case 18:
- if (offset_r18 || fp_displacement == 0)
- continue;
-
- offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
- offset_r18 *= sizeof(unsigned long);
- offset_r18 += fp_displacement;
- break;
- }
-
- break;
- case (0xcc >> 2): /* movi */
- if (dest >= 63) {
- printk(KERN_NOTICE "%s: Invalid dest reg %d "
- "specified in movi handler. Failed "
- "opcode was 0x%lx: ", __func__,
- dest, op);
-
- continue;
- }
-
- /* Sign extend */
- regcache[dest] =
- sign_extend64((((u64)op >> 10) & 0xffff), 9);
- break;
- case (0xd0 >> 2): /* addi */
- case (0xd4 >> 2): /* addi.l */
- /* Look for r15, -FRAME_SIZE, r15 */
- if (src != 15 || dest != 15)
- continue;
-
- /* Sign extended frame size.. */
- fp_displacement +=
- (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
- fp_prev = fp - fp_displacement;
- break;
- }
-
- if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
- break;
- }
-
- if (offset_r14 == 0 || fp_prev == 0) {
- if (!offset_r14)
- pr_debug("Unable to find r14 offset\n");
- if (!fp_prev)
- pr_debug("Unable to find previous fp\n");
-
- return -EINVAL;
- }
-
- /* For innermost leaf function, there might not be a offset_r18 */
- if (!*pprev_pc && (offset_r18 == 0))
- return -EINVAL;
-
- *pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
-
- if (offset_r18)
- *pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
-
- *pprev_pc &= ~1;
-
- return 0;
-}
-
-/*
- * Don't put this on the stack since we'll want to call in to
- * sh64_unwinder_dump() when we're close to underflowing the stack
- * anyway.
- */
-static struct pt_regs here_regs;
-
-extern const char syscall_ret;
-extern const char ret_from_syscall;
-extern const char ret_from_exception;
-extern const char ret_from_irq;
-
-static void sh64_unwind_inner(const struct stacktrace_ops *ops,
- void *data, struct pt_regs *regs);
-
-static inline void unwind_nested(const struct stacktrace_ops *ops, void *data,
- unsigned long pc, unsigned long fp)
-{
- if ((fp >= __MEMORY_START) &&
- ((fp & 7) == 0))
- sh64_unwind_inner(ops, data, (struct pt_regs *)fp);
-}
-
-static void sh64_unwind_inner(const struct stacktrace_ops *ops,
- void *data, struct pt_regs *regs)
-{
- unsigned long pc, fp;
- int ofs = 0;
- int first_pass;
-
- pc = regs->pc & ~1;
- fp = regs->regs[14];
-
- first_pass = 1;
- for (;;) {
- int cond;
- unsigned long next_fp, next_pc;
-
- if (pc == ((unsigned long)&syscall_ret & ~1)) {
- printk("SYSCALL\n");
- unwind_nested(ops, data, pc, fp);
- return;
- }
-
- if (pc == ((unsigned long)&ret_from_syscall & ~1)) {
- printk("SYSCALL (PREEMPTED)\n");
- unwind_nested(ops, data, pc, fp);
- return;
- }
-
- /* In this case, the PC is discovered by lookup_prev_stack_frame but
- it has 4 taken off it to look like the 'caller' */
- if (pc == ((unsigned long)&ret_from_exception & ~1)) {
- printk("EXCEPTION\n");
- unwind_nested(ops, data, pc, fp);
- return;
- }
-
- if (pc == ((unsigned long)&ret_from_irq & ~1)) {
- printk("IRQ\n");
- unwind_nested(ops, data, pc, fp);
- return;
- }
-
- cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
- ((pc & 3) == 0) && ((fp & 7) == 0));
-
- pc -= ofs;
-
- ops->address(data, pc, 1);
-
- if (first_pass) {
- /* If the innermost frame is a leaf function, it's
- * possible that r18 is never saved out to the stack.
- */
- next_pc = regs->regs[18];
- } else {
- next_pc = 0;
- }
-
- if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
- ofs = sizeof(unsigned long);
- pc = next_pc & ~1;
- fp = next_fp;
- } else {
- printk("Unable to lookup previous stack frame\n");
- break;
- }
- first_pass = 0;
- }
-
- printk("\n");
-}
-
-static void sh64_unwinder_dump(struct task_struct *task,
- struct pt_regs *regs,
- unsigned long *sp,
- const struct stacktrace_ops *ops,
- void *data)
-{
- if (!regs) {
- /*
- * Fetch current regs if we have no other saved state to back
- * trace from.
- */
- regs = &here_regs;
-
- __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
- __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
- __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
-
- __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
- __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
- __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
- __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
- __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
- __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
- __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
- __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
-
- __asm__ __volatile__ (
- "pta 0f, tr0\n\t"
- "blink tr0, %0\n\t"
- "0: nop"
- : "=r" (regs->pc)
- );
- }
-
- sh64_unwind_inner(ops, data, regs);
-}
-
-static struct unwinder sh64_unwinder = {
- .name = "sh64-unwinder",
- .dump = sh64_unwinder_dump,
- .rating = 150,
-};
-
-static int __init sh64_unwinder_init(void)
-{
- return unwinder_register(&sh64_unwinder);
-}
-early_initcall(sh64_unwinder_init);