From c601a51af10f714292f42eab45fa8c9154dc1414 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 3 Jul 2009 16:16:54 +0900 Subject: sh: Use bootmem ontop of lmb Rework the bootmem allocator to use the lmb framework. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/setup.c | 71 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 52 insertions(+), 19 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index dd38338553ef..ceb409bf7741 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -233,39 +234,45 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn, void __init setup_bootmem_allocator(unsigned long free_pfn) { unsigned long bootmap_size; + unsigned long bootmap_pages, bootmem_paddr; + u64 total_pages = (lmb_end_of_DRAM() - __MEMORY_START) >> PAGE_SHIFT; + int i; + + bootmap_pages = bootmem_bootmap_pages(total_pages); + + bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); /* * Find a proper area for the bootmem bitmap. After this * bootstrap step all allocations (until the page allocator * is intact) must be done via bootmem_alloc(). */ - bootmap_size = init_bootmem_node(NODE_DATA(0), free_pfn, + bootmap_size = init_bootmem_node(NODE_DATA(0), + bootmem_paddr >> PAGE_SHIFT, min_low_pfn, max_low_pfn); - __add_active_range(0, min_low_pfn, max_low_pfn); - register_bootmem_low_pages(); - - node_set_online(0); + /* Add active regions with valid PFNs. */ + for (i = 0; i < lmb.memory.cnt; i++) { + unsigned long start_pfn, end_pfn; + start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; + end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); + __add_active_range(0, start_pfn, end_pfn); + } /* - * Reserve the kernel text and - * Reserve the bootmem bitmap. We do this in two steps (first step - * was init_bootmem()), because this catches the (definitely buggy) - * case of us accidentally initializing the bootmem allocator with - * an invalid RAM area. + * Add all physical memory to the bootmem map and mark each + * area as present. */ - reserve_bootmem(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, - (PFN_PHYS(free_pfn) + bootmap_size + PAGE_SIZE - 1) - - (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET), - BOOTMEM_DEFAULT); + register_bootmem_low_pages(); - /* - * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. - */ - if (CONFIG_ZERO_PAGE_OFFSET != 0) - reserve_bootmem(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET, + /* Reserve the sections we're already using. */ + for (i = 0; i < lmb.reserved.cnt; i++) + reserve_bootmem(lmb.reserved.region[i].base, + lmb_size_bytes(&lmb.reserved, i), BOOTMEM_DEFAULT); + node_set_online(0); + sparse_memory_present_with_active_regions(0); #ifdef CONFIG_BLK_DEV_INITRD @@ -296,12 +303,37 @@ void __init setup_bootmem_allocator(unsigned long free_pfn) static void __init setup_memory(void) { unsigned long start_pfn; + u64 base = min_low_pfn << PAGE_SHIFT; + u64 size = (max_low_pfn << PAGE_SHIFT) - base; /* * Partially used pages are not usable - thus * we are rounding upwards: */ start_pfn = PFN_UP(__pa(_end)); + + lmb_add(base, size); + + /* + * Reserve the kernel text and + * Reserve the bootmem bitmap. We do this in two steps (first step + * was init_bootmem()), because this catches the (definitely buggy) + * case of us accidentally initializing the bootmem allocator with + * an invalid RAM area. + */ + lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, + (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - + (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); + + /* + * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. + */ + if (CONFIG_ZERO_PAGE_OFFSET != 0) + lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); + + lmb_analyze(); + lmb_dump_all(); + setup_bootmem_allocator(start_pfn); } #else @@ -402,6 +434,7 @@ void __init setup_arch(char **cmdline_p) nodes_clear(node_online_map); /* Setup bootmem with available RAM */ + lmb_init(); setup_memory(); sparse_init(); -- cgit v1.2.3-59-g8ed1b From 79714acbab080ad351acf4bba9a2bbc21d65c93c Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 3 Jul 2009 10:08:05 +0000 Subject: sh: hwblk base implementation This patch is the hwblk base implementation, containing structures and shared functions dealing with hardware blocks. A each processor model should provide a list of hwblks and describe which module stop bit that is associated with each hwblck and how the hwblks are grouped together into areas. The shared code keeps track of the usage count for each hwblk and the areas. Fallback implementations for processor specific code are also kept as weak symbols. The clock framework, the runtime pm code and cpuidle will all tie into this hwblk implementation. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/hwblk.h | 61 +++++++++++++++++++++ arch/sh/kernel/cpu/Makefile | 2 +- arch/sh/kernel/cpu/hwblk.c | 130 ++++++++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/time.c | 2 + 4 files changed, 194 insertions(+), 1 deletion(-) create mode 100644 arch/sh/include/asm/hwblk.h create mode 100644 arch/sh/kernel/cpu/hwblk.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/hwblk.h b/arch/sh/include/asm/hwblk.h new file mode 100644 index 000000000000..51a46f496639 --- /dev/null +++ b/arch/sh/include/asm/hwblk.h @@ -0,0 +1,61 @@ +#ifndef __ASM_SH_HWBLK_H +#define __ASM_SH_HWBLK_H + +#include +#include + +#define HWBLK_AREA_FLAG_PARENT (1 << 0) /* valid parent */ + +#define HWBLK_AREA(_flags, _parent) \ +{ \ + .flags = _flags, \ + .parent = _parent, \ +} + +struct hwblk_area { + unsigned long cnt; + unsigned char parent; + unsigned char flags; +}; + +#define HWBLK(_mstp, _bit, _area) \ +{ \ + .mstp = (void __iomem *)_mstp, \ + .bit = _bit, \ + .area = _area, \ +} + +struct hwblk { + void __iomem *mstp; + unsigned char bit; + unsigned char area; + unsigned long cnt; +}; + +struct hwblk_info { + struct hwblk_area *areas; + int nr_areas; + struct hwblk *hwblks; + int nr_hwblks; +}; + +/* Should be defined by processor-specific code */ +int arch_hwblk_init(void); +int arch_hwblk_sleep_mode(void); + +int hwblk_register(struct hwblk_info *info); +int hwblk_init(void); + +/* allow clocks to enable and disable hardware blocks */ +#define SH_HWBLK_CLK(_name, _id, _parent, _hwblk, _flags) \ +{ \ + .name = _name, \ + .id = _id, \ + .parent = _parent, \ + .arch_flags = _hwblk, \ + .flags = _flags, \ +} + +int sh_hwblk_clk_register(struct clk *clks, int nr); + +#endif /* __ASM_SH_HWBLK_H */ diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile index eecad7cbd61e..3d6b9312dc47 100644 --- a/arch/sh/kernel/cpu/Makefile +++ b/arch/sh/kernel/cpu/Makefile @@ -19,4 +19,4 @@ obj-$(CONFIG_UBC_WAKEUP) += ubc.o obj-$(CONFIG_SH_ADC) += adc.o obj-$(CONFIG_SH_CLK_CPG) += clock-cpg.o -obj-y += irq/ init.o clock.o +obj-y += irq/ init.o clock.o hwblk.o diff --git a/arch/sh/kernel/cpu/hwblk.c b/arch/sh/kernel/cpu/hwblk.c new file mode 100644 index 000000000000..7c3a73deff24 --- /dev/null +++ b/arch/sh/kernel/cpu/hwblk.c @@ -0,0 +1,130 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_SPINLOCK(hwblk_lock); + +static void hwblk_area_inc(struct hwblk_info *info, int area) +{ + struct hwblk_area *hap = info->areas + area; + + hap->cnt++; + if (hap->cnt == 1) + if (hap->flags & HWBLK_AREA_FLAG_PARENT) + hwblk_area_inc(info, hap->parent); +} + +static void hwblk_area_dec(struct hwblk_info *info, int area) +{ + struct hwblk_area *hap = info->areas + area; + + if (hap->cnt == 1) + if (hap->flags & HWBLK_AREA_FLAG_PARENT) + hwblk_area_dec(info, hap->parent); + hap->cnt--; +} + +static void hwblk_enable(struct hwblk_info *info, int hwblk) +{ + struct hwblk *hp = info->hwblks + hwblk; + unsigned long tmp; + unsigned long flags; + + spin_lock_irqsave(&hwblk_lock, flags); + + hp->cnt++; + if (hp->cnt == 1) { + hwblk_area_inc(info, hp->area); + + tmp = __raw_readl(hp->mstp); + tmp &= ~(1 << hp->bit); + __raw_writel(tmp, hp->mstp); + } + + spin_unlock_irqrestore(&hwblk_lock, flags); +} + +static void hwblk_disable(struct hwblk_info *info, int hwblk) +{ + struct hwblk *hp = info->hwblks + hwblk; + unsigned long tmp; + unsigned long flags; + + spin_lock_irqsave(&hwblk_lock, flags); + + if (hp->cnt == 1) { + hwblk_area_dec(info, hp->area); + + tmp = __raw_readl(hp->mstp); + tmp |= 1 << hp->bit; + __raw_writel(tmp, hp->mstp); + } + hp->cnt--; + + spin_unlock_irqrestore(&hwblk_lock, flags); +} + +static struct hwblk_info *hwblk_info; + +int __init hwblk_register(struct hwblk_info *info) +{ + hwblk_info = info; + return 0; +} + +int __init __weak arch_hwblk_init(void) +{ + return 0; +} + +int __weak arch_hwblk_sleep_mode(void) +{ + return SUSP_SH_SLEEP; +} + +int __init hwblk_init(void) +{ + return arch_hwblk_init(); +} + +/* allow clocks to enable and disable hardware blocks */ +static int sh_hwblk_clk_enable(struct clk *clk) +{ + if (!hwblk_info) + return -ENOENT; + + hwblk_enable(hwblk_info, clk->arch_flags); + return 0; +} + +static void sh_hwblk_clk_disable(struct clk *clk) +{ + if (hwblk_info) + hwblk_disable(hwblk_info, clk->arch_flags); +} + +static struct clk_ops sh_hwblk_clk_ops = { + .enable = sh_hwblk_clk_enable, + .disable = sh_hwblk_clk_disable, + .recalc = followparent_recalc, +}; + +int __init sh_hwblk_clk_register(struct clk *clks, int nr) +{ + struct clk *clkp; + int ret = 0; + int k; + + for (k = 0; !ret && (k < nr); k++) { + clkp = clks + k; + clkp->ops = &sh_hwblk_clk_ops; + ret |= clk_register(clkp); + } + + return ret; +} diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index 9b352a1e3fb4..d2424b068b7b 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c @@ -21,6 +21,7 @@ #include #include #include +#include #include /* Dummy RTC ops */ @@ -96,6 +97,7 @@ void __init time_init(void) if (board_time_init) board_time_init(); + hwblk_init(); clk_init(); rtc_sh_get_time(&xtime); -- cgit v1.2.3-59-g8ed1b From a61c1a636628a28ab5b42a9d36582a8f6a08893a Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 3 Jul 2009 10:15:25 +0000 Subject: sh: hwblk for sh7722 This patch contains the sh7722 specific hwblk implementation. Hwblk ids are added to the processor specific header file, module stop bits and areas are kept track of as hwblks, clocks are converted to make use of the shared hwblk code. Code to determine allowed sleep modes is also added. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/cpu-sh4/cpu/sh7722.h | 14 +++++ arch/sh/kernel/cpu/sh4a/Makefile | 2 +- arch/sh/kernel/cpu/sh4a/clock-sh7722.c | 60 ++++++++++--------- arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c | 106 +++++++++++++++++++++++++++++++++ 4 files changed, 153 insertions(+), 29 deletions(-) create mode 100644 arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/cpu-sh4/cpu/sh7722.h b/arch/sh/include/cpu-sh4/cpu/sh7722.h index 738ea43c5038..48560407cbe1 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7722.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7722.h @@ -221,4 +221,18 @@ enum { GPIO_FN_KEYOUT3, GPIO_FN_KEYOUT4_IN6, GPIO_FN_KEYOUT5_IN5, }; +enum { + HWBLK_UNKNOWN = 0, + HWBLK_TLB, HWBLK_IC, HWBLK_OC, HWBLK_URAM, HWBLK_XYMEM, + HWBLK_INTC, HWBLK_DMAC, HWBLK_SHYWAY, HWBLK_HUDI, + HWBLK_UBC, HWBLK_TMU, HWBLK_CMT, HWBLK_RWDT, HWBLK_FLCTL, + HWBLK_SCIF0, HWBLK_SCIF1, HWBLK_SCIF2, HWBLK_SIO, + HWBLK_SIOF0, HWBLK_SIOF1, HWBLK_IIC, HWBLK_RTC, + HWBLK_TPU, HWBLK_IRDA, HWBLK_SDHI, HWBLK_SIM, HWBLK_KEYSC, + HWBLK_TSIF, HWBLK_USBF, HWBLK_2DG, HWBLK_SIU, HWBLK_VOU, + HWBLK_JPU, HWBLK_BEU, HWBLK_CEU, HWBLK_VEU, HWBLK_VPU, + HWBLK_LCDC, + HWBLK_NR, +}; + #endif /* __ASM_SH7722_H__ */ diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index ebdd391d5f42..3cafda696378 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -25,7 +25,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o -clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o +clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o hwblk-sh7722.o clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c index 40f859354f79..1fa9e1dd1cc8 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include /* SH7722 registers */ #define FRQCR 0xa4150000 @@ -140,35 +142,37 @@ struct clk div6_clks[] = { SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0), }; -#define MSTP(_str, _parent, _reg, _bit, _flags) \ - SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _flags) +#define R_CLK &r_clk +#define P_CLK &div4_clks[DIV4_P] +#define B_CLK &div4_clks[DIV4_B] +#define U_CLK &div4_clks[DIV4_U] static struct clk mstp_clks[] = { - MSTP("uram0", &div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT), - MSTP("xymem0", &div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT), - MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0), - MSTP("cmt0", &r_clk, MSTPCR0, 14, 0), - MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0), - MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0), - MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 7, 0), - MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 6, 0), - MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 5, 0), - - MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0), - MSTP("rtc0", &r_clk, MSTPCR1, 8, 0), - - MSTP("sdhi0", &div4_clks[DIV4_P], MSTPCR2, 18, 0), - MSTP("keysc0", &r_clk, MSTPCR2, 14, 0), - MSTP("usbf0", &div4_clks[DIV4_P], MSTPCR2, 11, 0), - MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 9, 0), - MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0), - MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0), - MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT), - MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0), - MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0), - MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT), - MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT), - MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0), + SH_HWBLK_CLK("uram0", -1, U_CLK, HWBLK_URAM, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("xymem0", -1, B_CLK, HWBLK_XYMEM, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU, 0), + SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0), + SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0), + SH_HWBLK_CLK("flctl0", -1, P_CLK, HWBLK_FLCTL, 0), + SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0), + SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0), + SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0), + + SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC, 0), + SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0), + + SH_HWBLK_CLK("sdhi0", -1, P_CLK, HWBLK_SDHI, 0), + SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0), + SH_HWBLK_CLK("usbf0", -1, P_CLK, HWBLK_USBF, 0), + SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0), + SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0), + SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0), + SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0), + SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0), + SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("lcdc0", -1, P_CLK, HWBLK_LCDC, 0), }; int __init arch_clk_init(void) @@ -191,7 +195,7 @@ int __init arch_clk_init(void) ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); if (!ret) - ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); + ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks)); return ret; } diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c new file mode 100644 index 000000000000..00a1c02d82b1 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c @@ -0,0 +1,106 @@ +/* + * arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c + * + * SH7722 hardware block support + * + * Copyright (C) 2009 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include +#include +#include +#include + +/* SH7722 registers */ +#define MSTPCR0 0xa4150030 +#define MSTPCR1 0xa4150034 +#define MSTPCR2 0xa4150038 + +/* SH7722 Power Domains */ +enum { CORE_AREA, SUB_AREA, CORE_AREA_BM }; +static struct hwblk_area sh7722_hwblk_area[] = { + [CORE_AREA] = HWBLK_AREA(0, 0), + [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA), + [SUB_AREA] = HWBLK_AREA(0, 0), +}; + +/* Table mapping HWBLK to Module Stop Bit and Power Domain */ +static struct hwblk sh7722_hwblk[HWBLK_NR] = { + [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA), + [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA), + [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA), + [HWBLK_URAM] = HWBLK(MSTPCR0, 28, CORE_AREA), + [HWBLK_XYMEM] = HWBLK(MSTPCR0, 26, CORE_AREA), + [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA), + [HWBLK_DMAC] = HWBLK(MSTPCR0, 21, CORE_AREA_BM), + [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA), + [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA), + [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA), + [HWBLK_TMU] = HWBLK(MSTPCR0, 15, CORE_AREA), + [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA), + [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA), + [HWBLK_FLCTL] = HWBLK(MSTPCR0, 10, CORE_AREA), + [HWBLK_SCIF0] = HWBLK(MSTPCR0, 7, CORE_AREA), + [HWBLK_SCIF1] = HWBLK(MSTPCR0, 6, CORE_AREA), + [HWBLK_SCIF2] = HWBLK(MSTPCR0, 5, CORE_AREA), + [HWBLK_SIO] = HWBLK(MSTPCR0, 3, CORE_AREA), + [HWBLK_SIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA), + [HWBLK_SIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA), + + [HWBLK_IIC] = HWBLK(MSTPCR1, 9, CORE_AREA), + [HWBLK_RTC] = HWBLK(MSTPCR1, 8, SUB_AREA), + + [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA), + [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA), + [HWBLK_SDHI] = HWBLK(MSTPCR2, 18, CORE_AREA), + [HWBLK_SIM] = HWBLK(MSTPCR2, 16, CORE_AREA), + [HWBLK_KEYSC] = HWBLK(MSTPCR2, 14, SUB_AREA), + [HWBLK_TSIF] = HWBLK(MSTPCR2, 13, SUB_AREA), + [HWBLK_USBF] = HWBLK(MSTPCR2, 11, CORE_AREA), + [HWBLK_2DG] = HWBLK(MSTPCR2, 9, CORE_AREA_BM), + [HWBLK_SIU] = HWBLK(MSTPCR2, 8, CORE_AREA), + [HWBLK_JPU] = HWBLK(MSTPCR2, 6, CORE_AREA_BM), + [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM), + [HWBLK_BEU] = HWBLK(MSTPCR2, 4, CORE_AREA_BM), + [HWBLK_CEU] = HWBLK(MSTPCR2, 3, CORE_AREA_BM), + [HWBLK_VEU] = HWBLK(MSTPCR2, 2, CORE_AREA_BM), + [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM), + [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM), +}; + +static struct hwblk_info sh7722_hwblk_info = { + .areas = sh7722_hwblk_area, + .nr_areas = ARRAY_SIZE(sh7722_hwblk_area), + .hwblks = sh7722_hwblk, + .nr_hwblks = ARRAY_SIZE(sh7722_hwblk), +}; + +int arch_hwblk_sleep_mode(void) +{ + if (!sh7722_hwblk_area[CORE_AREA].cnt) + return SUSP_SH_STANDBY | SUSP_SH_SF; + + if (!sh7722_hwblk_area[CORE_AREA_BM].cnt) + return SUSP_SH_SLEEP | SUSP_SH_SF; + + return SUSP_SH_SLEEP; +} + +int __init arch_hwblk_init(void) +{ + return hwblk_register(&sh7722_hwblk_info); +} -- cgit v1.2.3-59-g8ed1b From 7426394f20c2e74b7c560bcd266cec1b327a269b Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 3 Jul 2009 10:28:00 +0000 Subject: sh: cpuidle for SuperH Mobile using hwblk This patch adds cpuidle support for SuperH Mobile. The sleep mode selected by cpuidle is compared with the mode selected by the hwblk sleep code and the best allowed mode is entered. At this point "Sleep mode" and "Sleep mode + SF" are supported. This code can easily be extended to support "Software suspend mode", but the assembly code must first be updated to avoid loosing interrupts. Also, update the code to only copy the assembly snippet into internal memory once at bootup. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/suspend.h | 9 +++ arch/sh/kernel/cpu/shmobile/Makefile | 1 + arch/sh/kernel/cpu/shmobile/cpuidle.c | 102 ++++++++++++++++++++++++++++++++++ arch/sh/kernel/cpu/shmobile/pm.c | 26 ++++----- 4 files changed, 125 insertions(+), 13 deletions(-) create mode 100644 arch/sh/kernel/cpu/shmobile/cpuidle.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index b1b995370e79..5c8ea28ff7a4 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h @@ -10,6 +10,15 @@ struct swsusp_arch_regs { struct pt_regs user_regs; unsigned long bank1_regs[8]; }; + +void sh_mobile_call_standby(unsigned long mode); + +#ifdef CONFIG_CPU_IDLE +void sh_mobile_setup_cpuidle(void); +#else +static inline void sh_mobile_setup_cpuidle(void) {} +#endif + #endif /* flags passed to assembly suspend code */ diff --git a/arch/sh/kernel/cpu/shmobile/Makefile b/arch/sh/kernel/cpu/shmobile/Makefile index 08bfa7c7db29..e8a5111e848a 100644 --- a/arch/sh/kernel/cpu/shmobile/Makefile +++ b/arch/sh/kernel/cpu/shmobile/Makefile @@ -4,3 +4,4 @@ # Power Management & Sleep mode obj-$(CONFIG_PM) += pm.o sleep.o +obj-$(CONFIG_CPU_IDLE) += cpuidle.o diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c new file mode 100644 index 000000000000..4afdd975cc66 --- /dev/null +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c @@ -0,0 +1,102 @@ +/* + * arch/sh/kernel/cpu/shmobile/cpuidle.c + * + * Cpuidle support code for SuperH Mobile + * + * Copyright (C) 2009 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +static unsigned long cpuidle_mode[] = { + SUSP_SH_SLEEP, /* regular sleep mode */ + SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */ +}; + +static int cpuidle_sleep_enter(struct cpuidle_device *dev, + struct cpuidle_state *state) +{ + unsigned long allowed_mode = arch_hwblk_sleep_mode(); + ktime_t before, after; + int requested_state = state - &dev->states[0]; + int allowed_state; + int k; + + /* convert allowed mode to allowed state */ + for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--) + if (cpuidle_mode[k] == allowed_mode) + break; + + allowed_state = k; + + /* take the following into account for sleep mode selection: + * - allowed_state: best mode allowed by hardware (clock deps) + * - requested_state: best mode allowed by software (latencies) + */ + k = min_t(int, allowed_state, requested_state); + + dev->last_state = &dev->states[k]; + before = ktime_get(); + sh_mobile_call_standby(cpuidle_mode[k]); + after = ktime_get(); + return ktime_to_ns(ktime_sub(after, before)) >> 10; +} + +static struct cpuidle_device cpuidle_dev; +static struct cpuidle_driver cpuidle_driver = { + .name = "sh_idle", + .owner = THIS_MODULE, +}; + +void sh_mobile_setup_cpuidle(void) +{ + struct cpuidle_device *dev = &cpuidle_dev; + struct cpuidle_state *state; + int i; + + cpuidle_register_driver(&cpuidle_driver); + + for (i = 0; i < CPUIDLE_STATE_MAX; i++) { + dev->states[i].name[0] = '\0'; + dev->states[i].desc[0] = '\0'; + } + + i = CPUIDLE_DRIVER_STATE_START; + + state = &dev->states[i++]; + snprintf(state->name, CPUIDLE_NAME_LEN, "C0"); + strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN); + state->exit_latency = 1; + state->target_residency = 1 * 2; + state->power_usage = 3; + state->flags = 0; + state->flags |= CPUIDLE_FLAG_SHALLOW; + state->flags |= CPUIDLE_FLAG_TIME_VALID; + state->enter = cpuidle_sleep_enter; + + dev->safe_state = state; + + state = &dev->states[i++]; + snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); + strncpy(state->desc, "SuperH Sleep Mode [SF]", CPUIDLE_DESC_LEN); + state->exit_latency = 100; + state->target_residency = 1 * 2; + state->power_usage = 1; + state->flags = 0; + state->flags |= CPUIDLE_FLAG_TIME_VALID; + state->enter = cpuidle_sleep_enter; + + dev->state_count = i; + + cpuidle_register_device(dev); +} diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c index 8c067adf6830..de078d24ce56 100644 --- a/arch/sh/kernel/cpu/shmobile/pm.c +++ b/arch/sh/kernel/cpu/shmobile/pm.c @@ -1,5 +1,5 @@ /* - * arch/sh/kernel/cpu/sh4a/pm-sh_mobile.c + * arch/sh/kernel/cpu/shmobile/pm.c * * Power management support code for SuperH Mobile * @@ -32,20 +32,17 @@ * * R-standby mode is unsupported, but will be added in the future * U-standby mode is low priority since it needs bootloader hacks - * - * All modes should be tied in with cpuidle. But before that can - * happen we need to keep track of enabled hardware blocks so we - * can avoid entering sleep modes that stop clocks to hardware - * blocks that are in use even though the cpu core is idle. */ +#define ILRAM_BASE 0xe5200000 + extern const unsigned char sh_mobile_standby[]; extern const unsigned int sh_mobile_standby_size; -static void sh_mobile_call_standby(unsigned long mode) +void sh_mobile_call_standby(unsigned long mode) { extern void *vbr_base; - void *onchip_mem = (void *)0xe5200000; /* ILRAM */ + void *onchip_mem = (void *)ILRAM_BASE; void (*standby_onchip_mem)(unsigned long) = onchip_mem; /* Note: Wake up from sleep may generate exceptions! @@ -55,11 +52,6 @@ static void sh_mobile_call_standby(unsigned long mode) if (mode & SUSP_SH_SF) asm volatile("ldc %0, vbr" : : "r" (onchip_mem) : "memory"); - /* Copy the assembly snippet to the otherwise ununsed ILRAM */ - memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size); - wmb(); - ctrl_barrier(); - /* Let assembly snippet in on-chip memory handle the rest */ standby_onchip_mem(mode); @@ -85,7 +77,15 @@ static struct platform_suspend_ops sh_pm_ops = { static int __init sh_pm_init(void) { + void *onchip_mem = (void *)ILRAM_BASE; + + /* Copy the assembly snippet to the otherwise ununsed ILRAM */ + memcpy(onchip_mem, sh_mobile_standby, sh_mobile_standby_size); + wmb(); + ctrl_barrier(); + suspend_set_ops(&sh_pm_ops); + sh_mobile_setup_cpuidle(); return 0; } -- cgit v1.2.3-59-g8ed1b From c652d780c9cf7f860141de232b37160fe013feca Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Mon, 6 Jul 2009 20:16:33 +0900 Subject: sh: Add ftrace syscall tracing support Now that I've added TIF_SYSCALL_FTRACE the thread flags do not fit into a single byte any more. Code testing them now needs to be aware of the upper and lower bytes. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 1 + arch/sh/include/asm/syscall_32.h | 1 + arch/sh/include/asm/thread_info.h | 11 ++++--- arch/sh/kernel/Makefile_32 | 1 + arch/sh/kernel/entry-common.S | 18 ++++++++--- arch/sh/kernel/ftrace.c | 68 +++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/ptrace_32.c | 8 +++++ arch/sh/lib/mcount.S | 2 +- 8 files changed, 100 insertions(+), 10 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 801a4a79ffe3..29e41ec6537d 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -32,6 +32,7 @@ config SUPERH32 select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACE_MCOUNT_TEST + select HAVE_FTRACE_SYSCALLS select HAVE_ARCH_KGDB select ARCH_HIBERNATION_POSSIBLE if MMU diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h index 6f83f2cc45c1..7d80df4f09cb 100644 --- a/arch/sh/include/asm/syscall_32.h +++ b/arch/sh/include/asm/syscall_32.h @@ -65,6 +65,7 @@ static inline void syscall_get_arguments(struct task_struct *task, case 3: args[2] = regs->regs[6]; case 2: args[1] = regs->regs[5]; case 1: args[0] = regs->regs[4]; + case 0: break; default: BUG(); diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index f09ac4806294..499e315f4be2 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void) extern struct thread_info *alloc_thread_info(struct task_struct *tsk); extern void free_thread_info(struct thread_info *ti); - + #endif /* THREAD_SHIFT < PAGE_SHIFT */ #endif /* __ASSEMBLY__ */ @@ -116,6 +116,7 @@ extern void free_thread_info(struct thread_info *ti); #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SECCOMP 6 /* secure computing */ #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ +#define TIF_SYSCALL_FTRACE 8 /* for ftrace syscall instrumentation */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 18 @@ -129,25 +130,27 @@ extern void free_thread_info(struct thread_info *ti); #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SYSCALL_FTRACE (1 << TIF_SYSCALL_FTRACE) #define _TIF_USEDFPU (1 << TIF_USEDFPU) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_FREEZE (1 << TIF_FREEZE) /* - * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within a byte, or we + * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we * blow the tst immediate size constraints and need to fix up * arch/sh/kernel/entry-common.S. */ /* work to do in syscall trace */ #define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ - _TIF_SYSCALL_AUDIT | _TIF_SECCOMP) + _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ + _TIF_SYSCALL_FTRACE) /* work to do on any return to u-space */ #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \ - _TIF_NOTIFY_RESUME) + _TIF_NOTIFY_RESUME | _TIF_SYSCALL_FTRACE) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 9411e3e31e68..fee924a9c46c 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -29,6 +29,7 @@ obj-$(CONFIG_IO_TRAPPED) += io_trapped.o obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_GENERIC_GPIO) += gpio.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o +obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_DUMP_CODE) += disassemble.o obj-$(CONFIG_HIBERNATION) += swsusp.o diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index d62359cfbbe2..d62175650c54 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -131,7 +131,7 @@ ENTRY(resume_userspace) nop #endif mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags - tst #_TIF_WORK_MASK, r0 + tst #(_TIF_WORK_MASK & 0xff), r0 bt/s __restore_all tst #_TIF_NEED_RESCHED, r0 @@ -163,7 +163,7 @@ work_resched: #endif ! mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags - tst #_TIF_WORK_MASK, r0 + tst #(_TIF_WORK_MASK & 0xff), r0 bt __restore_all bra work_pending tst #_TIF_NEED_RESCHED, r0 @@ -181,7 +181,7 @@ work_resched: syscall_exit_work: ! r0: current_thread_info->flags ! r8: current_thread_info - tst #_TIF_WORK_SYSCALL_MASK, r0 + tst #(_TIF_WORK_SYSCALL_MASK & 0xff), r0 bt/s work_pending tst #_TIF_NEED_RESCHED, r0 #ifdef CONFIG_TRACE_IRQFLAGS @@ -331,8 +331,12 @@ ENTRY(system_call) ! get_current_thread_info r8, r10 mov.l @(TI_FLAGS,r8), r8 - mov #_TIF_WORK_SYSCALL_MASK, r10 + mov #(_TIF_WORK_SYSCALL_MASK & 0xff), r10 + mov #(_TIF_WORK_SYSCALL_MASK >> 8), r9 tst r10, r8 + shll8 r9 + bf syscall_trace_entry + tst r9, r8 bf syscall_trace_entry ! mov.l 2f, r8 ! Number of syscalls @@ -359,7 +363,11 @@ syscall_exit: ! get_current_thread_info r8, r0 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags - tst #_TIF_ALLWORK_MASK, r0 + tst #(_TIF_ALLWORK_MASK & 0xff), r0 + mov #(_TIF_ALLWORK_MASK >> 8), r1 + bf syscall_exit_work + shlr8 r0 + tst r0, r1 bf syscall_exit_work bra __restore_all nop diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 066f37dc32a9..4f62eced0aec 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; @@ -131,3 +133,69 @@ int __init ftrace_dyn_arch_init(void *data) return 0; } + +#ifdef CONFIG_FTRACE_SYSCALLS + +extern unsigned long __start_syscalls_metadata[]; +extern unsigned long __stop_syscalls_metadata[]; +extern unsigned long *sys_call_table; + +static struct syscall_metadata **syscalls_metadata; + +static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) +{ + struct syscall_metadata *start; + struct syscall_metadata *stop; + char str[KSYM_SYMBOL_LEN]; + + + start = (struct syscall_metadata *)__start_syscalls_metadata; + stop = (struct syscall_metadata *)__stop_syscalls_metadata; + kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str); + + for ( ; start < stop; start++) { + if (start->name && !strcmp(start->name, str)) + return start; + } + + return NULL; +} + +#define FTRACE_SYSCALL_MAX (NR_syscalls - 1) + +struct syscall_metadata *syscall_nr_to_meta(int nr) +{ + if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0) + return NULL; + + return syscalls_metadata[nr]; +} + +void arch_init_ftrace_syscalls(void) +{ + int i; + struct syscall_metadata *meta; + unsigned long **psys_syscall_table = &sys_call_table; + static atomic_t refs; + + if (atomic_inc_return(&refs) != 1) + goto end; + + syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * + FTRACE_SYSCALL_MAX, GFP_KERNEL); + if (!syscalls_metadata) { + WARN_ON(1); + return; + } + + for (i = 0; i < FTRACE_SYSCALL_MAX; i++) { + meta = find_syscall_meta(psys_syscall_table[i]); + syscalls_metadata[i] = meta; + } + return; + + /* Paranoid: avoid overflow */ +end: + atomic_dec(&refs); +} +#endif /* CONFIG_FTRACE_SYSCALLS */ diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 3392e835a374..c198eceaee94 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -34,6 +34,8 @@ #include #include +#include + /* * This routine will get a word off of the process kernel stack. */ @@ -459,6 +461,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) */ ret = -1L; + if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) + ftrace_syscall_enter(regs); + if (unlikely(current->audit_context)) audit_syscall_entry(audit_arch(), regs->regs[3], regs->regs[4], regs->regs[5], @@ -475,6 +480,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]), regs->regs[0]); + if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) + ftrace_syscall_exit(regs); + step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, step); diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index cb87ef580b31..71e87f9b4fda 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S @@ -72,7 +72,7 @@ ftrace_caller: mov.l @r0, r0 tst r0, r0 bf ftrace_stub - + MCOUNT_ENTER() .globl ftrace_call -- cgit v1.2.3-59-g8ed1b From 2802e34590f290173a3f2aa5a4d662ae5373b420 Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Thu, 9 Jul 2009 14:45:59 +0000 Subject: sh: Clean up linker script using new linker script macros. This patch converts the sh architecture to use the new linker script macros in include/asm-generic/vmlinux.lds.h. Signed-off-by: Tim Abbott Cc: Paul Mundt Cc: Sam Ravnborg Cc: linux-sh@vger.kernel.org Signed-off-by: Paul Mundt --- arch/sh/kernel/vmlinux.lds.S | 87 ++++++-------------------------------------- 1 file changed, 11 insertions(+), 76 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index f53c76acaede..674ed8feb8ae 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -50,12 +50,7 @@ SECTIONS _etext = .; /* End of text section */ } = 0x0009 - . = ALIGN(16); /* Exception table */ - __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { - __start___ex_table = .; - *(__ex_table) - __stop___ex_table = .; - } + EXCEPTION_TABLE(16) NOTES RO_DATA(PAGE_SIZE) @@ -71,69 +66,14 @@ SECTIONS __uncached_end = .; } - . = ALIGN(THREAD_SIZE); - .data : AT(ADDR(.data) - LOAD_OFFSET) { /* Data */ - *(.data.init_task) - - . = ALIGN(L1_CACHE_BYTES); - *(.data.cacheline_aligned) - - . = ALIGN(L1_CACHE_BYTES); - *(.data.read_mostly) - - . = ALIGN(PAGE_SIZE); - *(.data.page_aligned) - - __nosave_begin = .; - *(.data.nosave) - . = ALIGN(PAGE_SIZE); - __nosave_end = .; - - DATA_DATA - CONSTRUCTORS - } + RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) _edata = .; /* End of data section */ . = ALIGN(PAGE_SIZE); /* Init code and data */ - .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { - __init_begin = .; - _sinittext = .; - INIT_TEXT - _einittext = .; - } - - .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { INIT_DATA } - - . = ALIGN(16); - .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { - __setup_start = .; - *(.init.setup) - __setup_end = .; - } - - .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) { - __initcall_start = .; - INITCALLS - __initcall_end = .; - } - - .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { - __con_initcall_start = .; - *(.con_initcall.init) - __con_initcall_end = .; - } - - SECURITY_INIT - -#ifdef CONFIG_BLK_DEV_INITRD - . = ALIGN(PAGE_SIZE); - .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { - __initramfs_start = .; - *(.init.ramfs) - __initramfs_end = .; - } -#endif + __init_begin = .; + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(16) . = ALIGN(4); .machvec.init : AT(ADDR(.machvec.init) - LOAD_OFFSET) { @@ -152,16 +92,11 @@ SECTIONS .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA } . = ALIGN(PAGE_SIZE); - .bss : AT(ADDR(.bss) - LOAD_OFFSET) { - __init_end = .; - __bss_start = .; /* BSS */ - *(.bss.page_aligned) - *(.bss) - *(COMMON) - . = ALIGN(4); - _ebss = .; /* uClinux MTD sucks */ - _end = . ; - } + __init_end = .; + BSS(PAGE_SIZE) + . = ALIGN(4); + _ebss = .; /* uClinux MTD sucks */ + _end = . ; /* * When something in the kernel is NOT compiled as a module, the @@ -170,7 +105,7 @@ SECTIONS * it's a module. */ /DISCARD/ : { - *(.exitcall.exit) + EXIT_CALL } STABS_DEBUG -- cgit v1.2.3-59-g8ed1b From b99610fb9cdf390965c62c22322596d961591160 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 11 Jul 2009 01:00:23 +0000 Subject: sh: Provide diagnostic kernel stack checks Enable kernel stack checking code in both the dynamic ftrace and mcount code paths. Check the stack to see if it's overflowing and make sure that the stack pointer contains an address that's either in init_stack or after the bss. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/Kconfig.debug | 11 ++++++ arch/sh/kernel/asm-offsets.c | 1 + arch/sh/lib/mcount.S | 85 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 97 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index 39224b57c6ef..52a132c24aab 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -123,4 +123,15 @@ config SH64_SR_WATCH bool "Debug: set SR.WATCH to enable hardware watchpoints and trace" depends on SUPERH64 +config STACK_DEBUG + bool "Enable diagnostic checks of the kernel stack" + depends on FUNCTION_TRACER + select DEBUG_STACKOVERFLOW + default n + help + This option allows checks to be performed on the kernel stack + at runtime. Saying Y here will add overhead to every function + call and will therefore incur a major performance hit. Most + users should say N. + endmenu diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c index 99aceb28ee24..d218e808294e 100644 --- a/arch/sh/kernel/asm-offsets.c +++ b/arch/sh/kernel/asm-offsets.c @@ -26,6 +26,7 @@ int main(void) DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block)); + DEFINE(TI_SIZE, sizeof(struct thread_info)); #ifdef CONFIG_HIBERNATION DEFINE(PBE_ADDRESS, offsetof(struct pbe, address)); diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index 71e87f9b4fda..8596483f7b41 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S @@ -9,6 +9,8 @@ * for more details. */ #include +#include +#include #define MCOUNT_ENTER() \ mov.l r4, @-r15; \ @@ -28,6 +30,55 @@ rts; \ mov.l @r15+, r4 +#ifdef CONFIG_STACK_DEBUG +/* + * Perform diagnostic checks on the state of the kernel stack. + * + * Check for stack overflow. If there is less than 1KB free + * then it has overflowed. + * + * Make sure the stack pointer contains a valid address. Valid + * addresses for kernel stacks are anywhere after the bss + * (after _ebss) and anywhere in init_thread_union (init_stack). + */ +#define STACK_CHECK() \ + mov #(THREAD_SIZE >> 10), r0; \ + shll8 r0; \ + shll2 r0; \ + \ + /* r1 = sp & (THREAD_SIZE - 1) */ \ + mov #-1, r1; \ + add r0, r1; \ + and r15, r1; \ + \ + mov #TI_SIZE, r3; \ + mov #(STACK_WARN >> 8), r2; \ + shll8 r2; \ + add r3, r2; \ + \ + /* Is the stack overflowing? */ \ + cmp/hi r2, r1; \ + bf stack_panic; \ + \ + /* If sp > _ebss then we're OK. */ \ + mov.l .L_ebss, r1; \ + cmp/hi r1, r15; \ + bt 1f; \ + \ + /* If sp < init_stack, we're not OK. */ \ + mov.l .L_init_thread_union, r1; \ + cmp/hs r1, r15; \ + bf stack_panic; \ + \ + /* If sp > init_stack && sp < _ebss, not OK. */ \ + add r0, r1; \ + cmp/hs r1, r15; \ + bt stack_panic; \ +1: +#else +#define STACK_CHECK() +#endif /* CONFIG_STACK_DEBUG */ + .align 2 .globl _mcount .type _mcount,@function @@ -41,6 +92,8 @@ mcount: tst r0, r0 bf ftrace_stub #endif + STACK_CHECK() + MCOUNT_ENTER() #ifdef CONFIG_DYNAMIC_FTRACE @@ -73,6 +126,8 @@ ftrace_caller: tst r0, r0 bf ftrace_stub + STACK_CHECK() + MCOUNT_ENTER() .globl ftrace_call @@ -100,6 +155,36 @@ ftrace_stub: rts nop +#ifdef CONFIG_STACK_DEBUG + .globl stack_panic +stack_panic: + mov.l .Ldump_stack, r0 + jsr @r0 + nop + + mov.l .Lpanic, r0 + jsr @r0 + mov.l .Lpanic_s, r4 + + rts + nop + .align 2 .Lfunction_trace_stop: .long function_trace_stop +.L_ebss: + .long _ebss +.L_init_thread_union: + .long init_thread_union +.Lpanic: + .long panic +.Lpanic_s: + .long .Lpanic_str +.Ldump_stack: + .long dump_stack + + .section .rodata + .align 2 +.Lpanic_str: + .string "Stack error" +#endif /* CONFIG_STACK_DEBUG */ -- cgit v1.2.3-59-g8ed1b From 327933f5d6cdf083284d3c06e0370d1de464aef4 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 11 Jul 2009 00:29:03 +0000 Subject: sh: Function graph tracer support Add both dynamic and static function graph tracer support for sh. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 1 + arch/sh/include/asm/ftrace.h | 3 + arch/sh/kernel/Makefile_32 | 1 + arch/sh/kernel/ftrace.c | 122 ++++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/vmlinux_64.lds.S | 0 arch/sh/lib/Makefile | 1 + arch/sh/lib/mcount.S | 117 +++++++++++++++++++++++++++++++++++++- 7 files changed, 244 insertions(+), 1 deletion(-) create mode 100644 arch/sh/kernel/vmlinux_64.lds.S (limited to 'arch/sh/kernel') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 29e41ec6537d..6d110a4f7f65 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -33,6 +33,7 @@ config SUPERH32 select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FTRACE_SYSCALLS + select HAVE_FUNCTION_GRAPH_TRACER select HAVE_ARCH_KGDB select ARCH_HIBERNATION_POSSIBLE if MMU diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index b09311ad1db3..7e0bcc4d4a96 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -13,8 +13,11 @@ extern void mcount(void); #ifdef CONFIG_DYNAMIC_FTRACE #define CALL_ADDR ((long)(ftrace_call)) #define STUB_ADDR ((long)(ftrace_stub)) +#define GRAPH_ADDR ((long)(ftrace_graph_call)) +#define CALLER_ADDR ((long)(ftrace_caller)) #define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALL_ADDR) - 4) +#define GRAPH_INSN_OFFSET ((CALLER_ADDR - GRAPH_ADDR) - 4) struct dyn_arch_ftrace { /* No extra data needed on sh */ diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index fee924a9c46c..94ed99b68002 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -30,6 +30,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_GENERIC_GPIO) += gpio.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_DUMP_CODE) += disassemble.o obj-$(CONFIG_HIBERNATION) += swsusp.o diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 4f62eced0aec..6647dfcb781d 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -16,11 +16,13 @@ #include #include #include +#include #include #include #include #include +#ifdef CONFIG_DYNAMIC_FTRACE static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; static unsigned char ftrace_nop[4]; @@ -133,6 +135,126 @@ int __init ftrace_dyn_arch_init(void *data) return 0; } +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#ifdef CONFIG_DYNAMIC_FTRACE +extern void ftrace_graph_call(void); + +static int ftrace_mod(unsigned long ip, unsigned long old_addr, + unsigned long new_addr) +{ + unsigned char code[MCOUNT_INSN_SIZE]; + + if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + if (old_addr != __raw_readl((unsigned long *)code)) + return -EINVAL; + + __raw_writel(new_addr, ip); + return 0; +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned long ip, old_addr, new_addr; + + ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; + old_addr = (unsigned long)(&skip_trace); + new_addr = (unsigned long)(&ftrace_graph_caller); + + return ftrace_mod(ip, old_addr, new_addr); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned long ip, old_addr, new_addr; + + ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; + old_addr = (unsigned long)(&ftrace_graph_caller); + new_addr = (unsigned long)(&skip_trace); + + return ftrace_mod(ip, old_addr, new_addr); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* + * Hook the return address and push it in the stack of return addrs + * in the current thread info. + * + * This is the main routine for the function graph tracer. The function + * graph tracer essentially works like this: + * + * parent is the stack address containing self_addr's return address. + * We pull the real return address out of parent and store it in + * current's ret_stack. Then, we replace the return address on the stack + * with the address of return_to_handler. self_addr is the function that + * called mcount. + * + * When self_addr returns, it will jump to return_to_handler which calls + * ftrace_return_to_handler. ftrace_return_to_handler will pull the real + * return address off of current's ret_stack and jump to it. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +{ + unsigned long old; + int faulted, err; + struct ftrace_graph_ent trace; + unsigned long return_hooker = (unsigned long)&return_to_handler; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * Protect against fault, even if it shouldn't + * happen. This tool is too much intrusive to + * ignore such a protection. + */ + __asm__ __volatile__( + "1: \n\t" + "mov.l @%2, %0 \n\t" + "2: \n\t" + "mov.l %3, @%2 \n\t" + "mov #0, %1 \n\t" + "3: \n\t" + ".section .fixup, \"ax\" \n\t" + "4: \n\t" + "mov.l 5f, %0 \n\t" + "jmp @%0 \n\t" + " mov #1, %1 \n\t" + ".balign 4 \n\t" + "5: .long 3b \n\t" + ".previous \n\t" + ".section __ex_table,\"a\" \n\t" + ".long 1b, 4b \n\t" + ".long 2b, 4b \n\t" + ".previous \n\t" + : "=&r" (old), "=r" (faulted) + : "r" (parent), "r" (return_hooker) + ); + + if (unlikely(faulted)) { + ftrace_graph_stop(); + WARN_ON(1); + return; + } + + err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); + if (err == -EBUSY) { + __raw_writel(old, parent); + return; + } + + trace.func = self_addr; + + /* Only trace if the calling function expects to */ + if (!ftrace_graph_entry(&trace)) { + current->curr_ret_stack--; + __raw_writel(old, parent); + } +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_FTRACE_SYSCALLS diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index aaea580b65bb..19328d90a2d1 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile @@ -25,6 +25,7 @@ memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o lib-$(CONFIG_MMU) += copy_page.o clear_page.o lib-$(CONFIG_FUNCTION_TRACER) += mcount.o +lib-$(CONFIG_FUNCTION_GRAPH_TRACER) += mcount.o lib-y += $(memcpy-y) $(udivsi3-y) EXTRA_CFLAGS += -Werror diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index 8596483f7b41..bd3ec648becc 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S @@ -111,14 +111,62 @@ mcount_call: jsr @r6 nop +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + mov.l .Lftrace_graph_return, r6 + mov.l .Lftrace_stub, r7 + cmp/eq r6, r7 + bt 1f + + mov.l .Lftrace_graph_caller, r0 + jmp @r0 + nop + +1: + mov.l .Lftrace_graph_entry, r6 + mov.l .Lftrace_graph_entry_stub, r7 + cmp/eq r6, r7 + bt skip_trace + + mov.l .Lftrace_graph_caller, r0 + jmp @r0 + nop + + .align 2 +.Lftrace_graph_return: + .long ftrace_graph_return +.Lftrace_graph_entry: + .long ftrace_graph_entry +.Lftrace_graph_entry_stub: + .long ftrace_graph_entry_stub +.Lftrace_graph_caller: + .long ftrace_graph_caller +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + + .globl skip_trace skip_trace: MCOUNT_LEAVE() .align 2 .Lftrace_trace_function: - .long ftrace_trace_function + .long ftrace_trace_function #ifdef CONFIG_DYNAMIC_FTRACE +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * NOTE: Do not move either ftrace_graph_call or ftrace_caller + * as this will affect the calculation of GRAPH_INSN_OFFSET. + */ + .globl ftrace_graph_call +ftrace_graph_call: + mov.l .Lskip_trace, r0 + jmp @r0 + nop + + .align 2 +.Lskip_trace: + .long skip_trace +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ + .globl ftrace_caller ftrace_caller: mov.l .Lfunction_trace_stop, r0 @@ -136,7 +184,12 @@ ftrace_call: jsr @r6 nop +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + bra ftrace_graph_call + nop +#else MCOUNT_LEAVE() +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_DYNAMIC_FTRACE */ /* @@ -188,3 +241,65 @@ stack_panic: .Lpanic_str: .string "Stack error" #endif /* CONFIG_STACK_DEBUG */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + .globl ftrace_graph_caller +ftrace_graph_caller: + mov.l 2f, r0 + mov.l @r0, r0 + tst r0, r0 + bt 1f + + mov.l 3f, r1 + jmp @r1 + nop +1: + /* + * MCOUNT_ENTER() pushed 5 registers onto the stack, so + * the stack address containing our return address is + * r15 + 20. + */ + mov #20, r0 + add r15, r0 + mov r0, r4 + + mov.l .Lprepare_ftrace_return, r0 + jsr @r0 + nop + + MCOUNT_LEAVE() + + .align 2 +2: .long function_trace_stop +3: .long skip_trace +.Lprepare_ftrace_return: + .long prepare_ftrace_return + + .globl return_to_handler +return_to_handler: + /* + * Save the return values. + */ + mov.l r0, @-r15 + mov.l r1, @-r15 + + mov #0, r4 + + mov.l .Lftrace_return_to_handler, r0 + jsr @r0 + nop + + /* + * The return value from ftrace_return_handler has the real + * address that we should return to. + */ + lds r0, pr + mov.l @r15+, r1 + rts + mov.l @r15+, r0 + + + .align 2 +.Lftrace_return_to_handler: + .long ftrace_return_to_handler +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -- cgit v1.2.3-59-g8ed1b From 7816fecd03e480ed0b47d674ed772ca0b45e1b5e Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 11 Jul 2009 00:29:04 +0000 Subject: sh: Mark __switch_to() as __notrace_funcgraph Annotate __switch_to() so that the function graph tracer does not try to trace it. Use __notrace_funcgraph, as opposed to notrace, so that other tracers can continue to trace __switch_to(). The reason that we don't want to trace __switch_to() with the function graph tracer is because of how the return address stack in task_struct is implemented. When we enter __switch_to we store the real return address on prev's ret_stack. When we return from __switch_to() we've patched the return address on the kernel stack to be return_to_handler. Calling return_to_handler we do, -> ftrace_return_to_handler() -> ftrace_pop_return_ftrace() Which tries to pop the real return address from current->ret_stack. The problem being that we stored the return address on prev->ret_stack, but current now points to next, and next->ret_stack doesn't contain the correct return address (and is possibly even empty). Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/process_32.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 92d7740faab1..9fee977f176b 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -264,8 +265,8 @@ static void ubc_set_tracing(int asid, unsigned long pc) * switch_to(x,y) should switch tasks from x to y. * */ -struct task_struct *__switch_to(struct task_struct *prev, - struct task_struct *next) +__notrace_funcgraph struct task_struct * +__switch_to(struct task_struct *prev, struct task_struct *next) { #if defined(CONFIG_SH_FPU) unlazy_fpu(prev, task_pt_regs(prev)); -- cgit v1.2.3-59-g8ed1b From 473d1cf4ee623b043790838bcf77e77958840bf2 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 11 Jul 2009 19:56:58 +0900 Subject: sh: Decouple mcount from ftrace. This adds a general CONFIG_MCOUNT in order to permit mcount generation without ftrace support. This is primarily for allowing platforms to enable aggressive stack overflow checking without having to enable ftrace support. Based on the sparc64 implementation. Signed-off-by: Paul Mundt --- arch/sh/Kconfig.debug | 5 +++++ arch/sh/Makefile | 4 ++++ arch/sh/boot/compressed/Makefile | 2 +- arch/sh/kernel/sh_ksyms_32.c | 2 +- arch/sh/lib/Makefile | 3 +-- 5 files changed, 12 insertions(+), 4 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index 52a132c24aab..75b5f4e06670 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -134,4 +134,9 @@ config STACK_DEBUG call and will therefore incur a major performance hit. Most users should say N. +config MCOUNT + def_bool y + depends on SUPERH32 + depends on STACK_DEBUG || FUNCTION_TRACER + endmenu diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 75d049b03f7e..52c34bf56962 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -186,6 +186,10 @@ KBUILD_CFLAGS += -pipe $(cflags-y) KBUILD_CPPFLAGS += $(cflags-y) KBUILD_AFLAGS += $(cflags-y) +ifeq ($(CONFIG_MCOUNT),y) + KBUILD_CFLAGS += -pg +endif + libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile index 9531bf1b7c2f..3af239cb9b0d 100644 --- a/arch/sh/boot/compressed/Makefile +++ b/arch/sh/boot/compressed/Makefile @@ -23,7 +23,7 @@ IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) -ifeq ($(CONFIG_FUNCTION_TRACER),y) +ifeq ($(CONFIG_MCOUNT),y) ORIG_CFLAGS := $(KBUILD_CFLAGS) KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) endif diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index fcc5de31f83b..5b81116046c1 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c @@ -106,7 +106,7 @@ EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(clear_user_page); #endif -#ifdef CONFIG_FUNCTION_TRACER +#ifdef CONFIG_MCOUNT EXPORT_SYMBOL(mcount); #endif EXPORT_SYMBOL(csum_partial); diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index 19328d90a2d1..c2b28d8b2dd1 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile @@ -24,8 +24,7 @@ memcpy-y := memcpy.o memcpy-$(CONFIG_CPU_SH4) := memcpy-sh4.o lib-$(CONFIG_MMU) += copy_page.o clear_page.o -lib-$(CONFIG_FUNCTION_TRACER) += mcount.o -lib-$(CONFIG_FUNCTION_GRAPH_TRACER) += mcount.o +lib-$(CONFIG_MCOUNT) += mcount.o lib-y += $(memcpy-y) $(udivsi3-y) EXTRA_CFLAGS += -Werror -- cgit v1.2.3-59-g8ed1b From 9f14b84afda297d301b81a5bcbd65e83d7b02034 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 11 Jul 2009 20:05:34 +0900 Subject: sh: Replace DEBUG_STACKOVERFLOW with STACK_DEBUG. STACK_DEBUG ties in to mcount in order to do function-granular stack overflow checks as opposed to lazily checking from IRQ context. As the default is nohz, the frequency of overflow checking is too irregular to catch much useful information, and so the mcount approach employed by sparc64 is adopted instead. This kills off the old check entirely from the do_IRQ() path and now adopts CONFIG_MCOUNT instead. Signed-off-by: Paul Mundt --- arch/sh/Kconfig.debug | 17 ++++------------- arch/sh/kernel/irq.c | 17 ----------------- 2 files changed, 4 insertions(+), 30 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index 75b5f4e06670..b440fd936714 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -61,12 +61,14 @@ config EARLY_PRINTK select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using the kernel command line option to toggle back and forth. -config DEBUG_STACKOVERFLOW +config STACK_DEBUG bool "Check for stack overflows" depends on DEBUG_KERNEL && SUPERH32 help This option will cause messages to be printed if free stack space - drops below a certain limit. + drops below a certain limit. Saying Y here will add overhead to + every function call and will therefore incur a major + performance hit. Most users should say N. config DEBUG_STACK_USAGE bool "Stack utilization instrumentation" @@ -123,17 +125,6 @@ config SH64_SR_WATCH bool "Debug: set SR.WATCH to enable hardware watchpoints and trace" depends on SUPERH64 -config STACK_DEBUG - bool "Enable diagnostic checks of the kernel stack" - depends on FUNCTION_TRACER - select DEBUG_STACKOVERFLOW - default n - help - This option allows checks to be performed on the kernel stack - at runtime. Saying Y here will add overhead to every function - call and will therefore incur a major performance hit. Most - users should say N. - config MCOUNT def_bool y depends on SUPERH32 diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 3d09062f4682..278c68c60488 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -114,23 +114,6 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs) #endif irq_enter(); - -#ifdef CONFIG_DEBUG_STACKOVERFLOW - /* Debugging check for stack overflow: is there less than 1KB free? */ - { - long sp; - - __asm__ __volatile__ ("and r15, %0" : - "=r" (sp) : "0" (THREAD_SIZE - 1)); - - if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { - printk("do_IRQ: stack overflow: %ld\n", - sp - sizeof(struct thread_info)); - dump_stack(); - } - } -#endif - irq = irq_demux(intc_evt2irq(irq)); #ifdef CONFIG_IRQSTACKS -- cgit v1.2.3-59-g8ed1b From fe27932052aebf77ac5f3e73962825d2aeb457a0 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 11 Jul 2009 20:32:14 +0900 Subject: sh: Use DECLARE_EXPORT() for mcount symbol export. The function prototype for mcount is not defined if we are not building with ftrace support enabled, so use DECLARE_EXPORT() to stub one in. Signed-off-by: Paul Mundt --- arch/sh/kernel/sh_ksyms_32.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index 5b81116046c1..cec610888e28 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c @@ -107,7 +107,7 @@ EXPORT_SYMBOL(clear_user_page); #endif #ifdef CONFIG_MCOUNT -EXPORT_SYMBOL(mcount); +DECLARE_EXPORT(mcount); #endif EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); -- cgit v1.2.3-59-g8ed1b From 72849873cdf213b5d7b8ae006e6740b2d78f6ad8 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 14 Jul 2009 06:51:54 -0400 Subject: sh: Kill off zero-sized vmlinux_64.lds.S This seems to be some merge damage, kill it off. Signed-off-by: Paul Mundt --- arch/sh/kernel/vmlinux_64.lds.S | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 arch/sh/kernel/vmlinux_64.lds.S (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S deleted file mode 100644 index e69de29bb2d1..000000000000 -- cgit v1.2.3-59-g8ed1b From 0f8ee1874fa80899debc0a0670e2bed0a28d2548 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 17 Jul 2009 14:24:55 +0000 Subject: sh: Add support for multiple hwblk counters Extend the SuperH hwblk code to support more than one counter. Contains ground work for the future Runtime PM implementation. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/hwblk.h | 13 ++++++- arch/sh/kernel/cpu/hwblk.c | 69 +++++++++++++++++++++++----------- arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c | 4 +- 3 files changed, 60 insertions(+), 26 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/hwblk.h b/arch/sh/include/asm/hwblk.h index 51a46f496639..c01d72cb6757 100644 --- a/arch/sh/include/asm/hwblk.h +++ b/arch/sh/include/asm/hwblk.h @@ -4,6 +4,9 @@ #include #include +#define HWBLK_CNT_USAGE 0 +#define HWBLK_CNT_NR 1 + #define HWBLK_AREA_FLAG_PARENT (1 << 0) /* valid parent */ #define HWBLK_AREA(_flags, _parent) \ @@ -13,7 +16,7 @@ } struct hwblk_area { - unsigned long cnt; + int cnt[HWBLK_CNT_NR]; unsigned char parent; unsigned char flags; }; @@ -29,7 +32,7 @@ struct hwblk { void __iomem *mstp; unsigned char bit; unsigned char area; - unsigned long cnt; + int cnt[HWBLK_CNT_NR]; }; struct hwblk_info { @@ -46,6 +49,12 @@ int arch_hwblk_sleep_mode(void); int hwblk_register(struct hwblk_info *info); int hwblk_init(void); +void hwblk_enable(struct hwblk_info *info, int hwblk); +void hwblk_disable(struct hwblk_info *info, int hwblk); + +void hwblk_cnt_inc(struct hwblk_info *info, int hwblk, int cnt); +void hwblk_cnt_dec(struct hwblk_info *info, int hwblk, int cnt); + /* allow clocks to enable and disable hardware blocks */ #define SH_HWBLK_CLK(_name, _id, _parent, _hwblk, _flags) \ { \ diff --git a/arch/sh/kernel/cpu/hwblk.c b/arch/sh/kernel/cpu/hwblk.c index 7c3a73deff24..c0ad7d46e784 100644 --- a/arch/sh/kernel/cpu/hwblk.c +++ b/arch/sh/kernel/cpu/hwblk.c @@ -9,38 +9,64 @@ static DEFINE_SPINLOCK(hwblk_lock); -static void hwblk_area_inc(struct hwblk_info *info, int area) +static void hwblk_area_mod_cnt(struct hwblk_info *info, + int area, int counter, int value, int goal) { struct hwblk_area *hap = info->areas + area; - hap->cnt++; - if (hap->cnt == 1) - if (hap->flags & HWBLK_AREA_FLAG_PARENT) - hwblk_area_inc(info, hap->parent); + hap->cnt[counter] += value; + + if (hap->cnt[counter] != goal) + return; + + if (hap->flags & HWBLK_AREA_FLAG_PARENT) + hwblk_area_mod_cnt(info, hap->parent, counter, value, goal); } -static void hwblk_area_dec(struct hwblk_info *info, int area) + +static int __hwblk_mod_cnt(struct hwblk_info *info, int hwblk, + int counter, int value, int goal) { - struct hwblk_area *hap = info->areas + area; + struct hwblk *hp = info->hwblks + hwblk; + + hp->cnt[counter] += value; + if (hp->cnt[counter] == goal) + hwblk_area_mod_cnt(info, hp->area, counter, value, goal); - if (hap->cnt == 1) - if (hap->flags & HWBLK_AREA_FLAG_PARENT) - hwblk_area_dec(info, hap->parent); - hap->cnt--; + return hp->cnt[counter]; } -static void hwblk_enable(struct hwblk_info *info, int hwblk) +static void hwblk_mod_cnt(struct hwblk_info *info, int hwblk, + int counter, int value, int goal) +{ + unsigned long flags; + + spin_lock_irqsave(&hwblk_lock, flags); + __hwblk_mod_cnt(info, hwblk, counter, value, goal); + spin_unlock_irqrestore(&hwblk_lock, flags); +} + +void hwblk_cnt_inc(struct hwblk_info *info, int hwblk, int counter) +{ + hwblk_mod_cnt(info, hwblk, counter, 1, 1); +} + +void hwblk_cnt_dec(struct hwblk_info *info, int hwblk, int counter) +{ + hwblk_mod_cnt(info, hwblk, counter, -1, 0); +} + +void hwblk_enable(struct hwblk_info *info, int hwblk) { struct hwblk *hp = info->hwblks + hwblk; unsigned long tmp; unsigned long flags; + int ret; spin_lock_irqsave(&hwblk_lock, flags); - hp->cnt++; - if (hp->cnt == 1) { - hwblk_area_inc(info, hp->area); - + ret = __hwblk_mod_cnt(info, hwblk, HWBLK_CNT_USAGE, 1, 1); + if (ret == 1) { tmp = __raw_readl(hp->mstp); tmp &= ~(1 << hp->bit); __raw_writel(tmp, hp->mstp); @@ -49,27 +75,26 @@ static void hwblk_enable(struct hwblk_info *info, int hwblk) spin_unlock_irqrestore(&hwblk_lock, flags); } -static void hwblk_disable(struct hwblk_info *info, int hwblk) +void hwblk_disable(struct hwblk_info *info, int hwblk) { struct hwblk *hp = info->hwblks + hwblk; unsigned long tmp; unsigned long flags; + int ret; spin_lock_irqsave(&hwblk_lock, flags); - if (hp->cnt == 1) { - hwblk_area_dec(info, hp->area); - + ret = __hwblk_mod_cnt(info, hwblk, HWBLK_CNT_USAGE, -1, 0); + if (ret == 0) { tmp = __raw_readl(hp->mstp); tmp |= 1 << hp->bit; __raw_writel(tmp, hp->mstp); } - hp->cnt--; spin_unlock_irqrestore(&hwblk_lock, flags); } -static struct hwblk_info *hwblk_info; +struct hwblk_info *hwblk_info; int __init hwblk_register(struct hwblk_info *info) { diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c index 00a1c02d82b1..a288b5d92341 100644 --- a/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7722.c @@ -91,10 +91,10 @@ static struct hwblk_info sh7722_hwblk_info = { int arch_hwblk_sleep_mode(void) { - if (!sh7722_hwblk_area[CORE_AREA].cnt) + if (!sh7722_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE]) return SUSP_SH_STANDBY | SUSP_SH_SF; - if (!sh7722_hwblk_area[CORE_AREA_BM].cnt) + if (!sh7722_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE]) return SUSP_SH_SLEEP | SUSP_SH_SF; return SUSP_SH_SLEEP; -- cgit v1.2.3-59-g8ed1b From 2094e504a7489a2d774d544592360e578d9325f8 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 17 Jul 2009 14:43:38 +0000 Subject: sh: hwblk support for sh7723 This patch adds hwblk support for the sh7723 processor. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/cpu-sh4/cpu/sh7723.h | 17 +++++ arch/sh/kernel/cpu/sh4a/Makefile | 2 +- arch/sh/kernel/cpu/sh4a/clock-sh7723.c | 110 ++++++++++++++++--------------- arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c | 117 +++++++++++++++++++++++++++++++++ 4 files changed, 193 insertions(+), 53 deletions(-) create mode 100644 arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/cpu-sh4/cpu/sh7723.h b/arch/sh/include/cpu-sh4/cpu/sh7723.h index 14c8ca936781..9b36fae72324 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7723.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7723.h @@ -265,4 +265,21 @@ enum { GPIO_FN_IDEA1, GPIO_FN_IDEA0, }; +enum { + HWBLK_UNKNOWN = 0, + HWBLK_TLB, HWBLK_IC, HWBLK_OC, HWBLK_L2C, HWBLK_ILMEM, HWBLK_FPU, + HWBLK_INTC, HWBLK_DMAC0, HWBLK_SHYWAY, + HWBLK_HUDI, HWBLK_DBG, HWBLK_UBC, HWBLK_SUBC, + HWBLK_TMU0, HWBLK_CMT, HWBLK_RWDT, HWBLK_DMAC1, HWBLK_TMU1, + HWBLK_FLCTL, + HWBLK_SCIF0, HWBLK_SCIF1, HWBLK_SCIF2, + HWBLK_SCIF3, HWBLK_SCIF4, HWBLK_SCIF5, + HWBLK_MSIOF0, HWBLK_MSIOF1, HWBLK_MERAM, HWBLK_IIC, HWBLK_RTC, + HWBLK_ATAPI, HWBLK_ADC, HWBLK_TPU, HWBLK_IRDA, HWBLK_TSIF, HWBLK_ICB, + HWBLK_SDHI0, HWBLK_SDHI1, HWBLK_KEYSC, HWBLK_USB, + HWBLK_2DG, HWBLK_SIU, HWBLK_VEU2H1, HWBLK_VOU, HWBLK_BEU, HWBLK_CEU, + HWBLK_VEU2H0, HWBLK_VPU, HWBLK_LCDC, + HWBLK_NR, +}; + #endif /* __ASM_SH7723_H__ */ diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 3cafda696378..1d7ae38bc611 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -26,7 +26,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o hwblk-sh7722.o -clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o +clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o hwblk-sh7723.o clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c index e67c2678b8ae..bf64c78eee34 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include /* SH7723 registers */ #define FRQCR 0xa4150000 @@ -140,60 +142,64 @@ struct clk div6_clks[] = { SH_CLK_DIV6("video_clk", &pll_clk, VCLKCR, 0), }; -#define MSTP(_str, _parent, _reg, _bit, _force_on, _need_cpg, _need_ram) \ - SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _force_on * CLK_ENABLE_ON_INIT) +#define R_CLK (&r_clk) +#define P_CLK (&div4_clks[DIV4_P]) +#define B_CLK (&div4_clks[DIV4_B]) +#define U_CLK (&div4_clks[DIV4_U]) +#define I_CLK (&div4_clks[DIV4_I]) +#define SH_CLK (&div4_clks[DIV4_SH]) static struct clk mstp_clks[] = { /* See page 60 of Datasheet V1.0: Overview -> Block Diagram */ - MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, 1, 1, 0), - MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, 1, 1, 0), - MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, 1, 1, 0), - MSTP("l2c0", &div4_clks[DIV4_SH], MSTPCR0, 28, 1, 1, 0), - MSTP("ilmem0", &div4_clks[DIV4_I], MSTPCR0, 27, 1, 1, 0), - MSTP("fpu0", &div4_clks[DIV4_I], MSTPCR0, 24, 1, 1, 0), - MSTP("intc0", &div4_clks[DIV4_I], MSTPCR0, 22, 1, 1, 0), - MSTP("dmac0", &div4_clks[DIV4_B], MSTPCR0, 21, 0, 1, 1), - MSTP("sh0", &div4_clks[DIV4_SH], MSTPCR0, 20, 0, 1, 0), - MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0, 1, 0), - MSTP("ubc0", &div4_clks[DIV4_I], MSTPCR0, 17, 0, 1, 0), - MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0, 1, 0), - MSTP("cmt0", &r_clk, MSTPCR0, 14, 0, 0, 0), - MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0, 0, 0), - MSTP("dmac1", &div4_clks[DIV4_B], MSTPCR0, 12, 0, 1, 1), - MSTP("tmu1", &div4_clks[DIV4_P], MSTPCR0, 11, 0, 1, 0), - MSTP("flctl0", &div4_clks[DIV4_P], MSTPCR0, 10, 0, 1, 0), - MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 9, 0, 1, 0), - MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 8, 0, 1, 0), - MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 7, 0, 1, 0), - MSTP("scif3", &div4_clks[DIV4_B], MSTPCR0, 6, 0, 1, 0), - MSTP("scif4", &div4_clks[DIV4_B], MSTPCR0, 5, 0, 1, 0), - MSTP("scif5", &div4_clks[DIV4_B], MSTPCR0, 4, 0, 1, 0), - MSTP("msiof0", &div4_clks[DIV4_B], MSTPCR0, 2, 0, 1, 0), - MSTP("msiof1", &div4_clks[DIV4_B], MSTPCR0, 1, 0, 1, 0), - MSTP("meram0", &div4_clks[DIV4_SH], MSTPCR0, 0, 1, 1, 0), - - MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0, 1, 0), - MSTP("rtc0", &r_clk, MSTPCR1, 8, 0, 0, 0), - - MSTP("atapi0", &div4_clks[DIV4_SH], MSTPCR2, 28, 0, 1, 0), - MSTP("adc0", &div4_clks[DIV4_P], MSTPCR2, 27, 0, 1, 0), - MSTP("tpu0", &div4_clks[DIV4_B], MSTPCR2, 25, 0, 1, 0), - MSTP("irda0", &div4_clks[DIV4_P], MSTPCR2, 24, 0, 1, 0), - MSTP("tsif0", &div4_clks[DIV4_B], MSTPCR2, 22, 0, 1, 0), - MSTP("icb0", &div4_clks[DIV4_B], MSTPCR2, 21, 0, 1, 1), - MSTP("sdhi0", &div4_clks[DIV4_B], MSTPCR2, 18, 0, 1, 0), - MSTP("sdhi1", &div4_clks[DIV4_B], MSTPCR2, 17, 0, 1, 0), - MSTP("keysc0", &r_clk, MSTPCR2, 14, 0, 0, 0), - MSTP("usb0", &div4_clks[DIV4_B], MSTPCR2, 11, 0, 1, 0), - MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 10, 0, 1, 1), - MSTP("siu0", &div4_clks[DIV4_B], MSTPCR2, 8, 0, 1, 0), - MSTP("veu1", &div4_clks[DIV4_B], MSTPCR2, 6, 1, 1, 1), - MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0, 1, 1), - MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0, 1, 1), - MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0, 1, 1), - MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, 1, 1, 1), - MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, 1, 1, 1), - MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0, 1, 1), + SH_HWBLK_CLK("tlb0", -1, I_CLK, HWBLK_TLB, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("ic0", -1, I_CLK, HWBLK_IC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("oc0", -1, I_CLK, HWBLK_OC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("l2c0", -1, SH_CLK, HWBLK_L2C, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("ilmem0", -1, I_CLK, HWBLK_ILMEM, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("fpu0", -1, I_CLK, HWBLK_FPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("intc0", -1, I_CLK, HWBLK_INTC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("dmac0", -1, B_CLK, HWBLK_DMAC0, 0), + SH_HWBLK_CLK("sh0", -1, SH_CLK, HWBLK_SHYWAY, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("hudi0", -1, P_CLK, HWBLK_HUDI, 0), + SH_HWBLK_CLK("ubc0", -1, I_CLK, HWBLK_UBC, 0), + SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU0, 0), + SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0), + SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0), + SH_HWBLK_CLK("dmac1", -1, B_CLK, HWBLK_DMAC1, 0), + SH_HWBLK_CLK("tmu1", -1, P_CLK, HWBLK_TMU1, 0), + SH_HWBLK_CLK("flctl0", -1, P_CLK, HWBLK_FLCTL, 0), + SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0), + SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0), + SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0), + SH_HWBLK_CLK("scif3", -1, B_CLK, HWBLK_SCIF3, 0), + SH_HWBLK_CLK("scif4", -1, B_CLK, HWBLK_SCIF4, 0), + SH_HWBLK_CLK("scif5", -1, B_CLK, HWBLK_SCIF5, 0), + SH_HWBLK_CLK("msiof0", -1, B_CLK, HWBLK_MSIOF0, 0), + SH_HWBLK_CLK("msiof1", -1, B_CLK, HWBLK_MSIOF1, 0), + SH_HWBLK_CLK("meram0", -1, SH_CLK, HWBLK_MERAM, 0), + + SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC, 0), + SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0), + + SH_HWBLK_CLK("atapi0", -1, SH_CLK, HWBLK_ATAPI, 0), + SH_HWBLK_CLK("adc0", -1, P_CLK, HWBLK_ADC, 0), + SH_HWBLK_CLK("tpu0", -1, B_CLK, HWBLK_TPU, 0), + SH_HWBLK_CLK("irda0", -1, P_CLK, HWBLK_IRDA, 0), + SH_HWBLK_CLK("tsif0", -1, B_CLK, HWBLK_TSIF, 0), + SH_HWBLK_CLK("icb0", -1, B_CLK, HWBLK_ICB, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("sdhi0", -1, B_CLK, HWBLK_SDHI0, 0), + SH_HWBLK_CLK("sdhi1", -1, B_CLK, HWBLK_SDHI1, 0), + SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0), + SH_HWBLK_CLK("usb0", -1, B_CLK, HWBLK_USB, 0), + SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0), + SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0), + SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU2H1, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0), + SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0), + SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0), + SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU2H0, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0), }; int __init arch_clk_init(void) @@ -216,7 +222,7 @@ int __init arch_clk_init(void) ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); if (!ret) - ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); + ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks)); return ret; } diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c new file mode 100644 index 000000000000..a7f4684d2032 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c @@ -0,0 +1,117 @@ +/* + * arch/sh/kernel/cpu/sh4a/hwblk-sh7723.c + * + * SH7723 hardware block support + * + * Copyright (C) 2009 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include +#include +#include +#include + +/* SH7723 registers */ +#define MSTPCR0 0xa4150030 +#define MSTPCR1 0xa4150034 +#define MSTPCR2 0xa4150038 + +/* SH7723 Power Domains */ +enum { CORE_AREA, SUB_AREA, CORE_AREA_BM }; +static struct hwblk_area sh7723_hwblk_area[] = { + [CORE_AREA] = HWBLK_AREA(0, 0), + [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA), + [SUB_AREA] = HWBLK_AREA(0, 0), +}; + +/* Table mapping HWBLK to Module Stop Bit and Power Domain */ +static struct hwblk sh7723_hwblk[HWBLK_NR] = { + [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA), + [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA), + [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA), + [HWBLK_L2C] = HWBLK(MSTPCR0, 28, CORE_AREA), + [HWBLK_ILMEM] = HWBLK(MSTPCR0, 27, CORE_AREA), + [HWBLK_FPU] = HWBLK(MSTPCR0, 24, CORE_AREA), + [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA), + [HWBLK_DMAC0] = HWBLK(MSTPCR0, 21, CORE_AREA_BM), + [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA), + [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA), + [HWBLK_DBG] = HWBLK(MSTPCR0, 18, CORE_AREA), + [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA), + [HWBLK_SUBC] = HWBLK(MSTPCR0, 16, CORE_AREA), + [HWBLK_TMU0] = HWBLK(MSTPCR0, 15, CORE_AREA), + [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA), + [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA), + [HWBLK_DMAC1] = HWBLK(MSTPCR0, 12, CORE_AREA_BM), + [HWBLK_TMU1] = HWBLK(MSTPCR0, 11, CORE_AREA), + [HWBLK_FLCTL] = HWBLK(MSTPCR0, 10, CORE_AREA), + [HWBLK_SCIF0] = HWBLK(MSTPCR0, 9, CORE_AREA), + [HWBLK_SCIF1] = HWBLK(MSTPCR0, 8, CORE_AREA), + [HWBLK_SCIF2] = HWBLK(MSTPCR0, 7, CORE_AREA), + [HWBLK_SCIF3] = HWBLK(MSTPCR0, 6, CORE_AREA), + [HWBLK_SCIF4] = HWBLK(MSTPCR0, 5, CORE_AREA), + [HWBLK_SCIF5] = HWBLK(MSTPCR0, 4, CORE_AREA), + [HWBLK_MSIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA), + [HWBLK_MSIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA), + [HWBLK_MERAM] = HWBLK(MSTPCR0, 0, CORE_AREA), + + [HWBLK_IIC] = HWBLK(MSTPCR1, 9, CORE_AREA), + [HWBLK_RTC] = HWBLK(MSTPCR1, 8, SUB_AREA), + + [HWBLK_ATAPI] = HWBLK(MSTPCR2, 28, CORE_AREA_BM), + [HWBLK_ADC] = HWBLK(MSTPCR2, 27, CORE_AREA), + [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA), + [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA), + [HWBLK_TSIF] = HWBLK(MSTPCR2, 22, CORE_AREA), + [HWBLK_ICB] = HWBLK(MSTPCR2, 21, CORE_AREA_BM), + [HWBLK_SDHI0] = HWBLK(MSTPCR2, 18, CORE_AREA), + [HWBLK_SDHI1] = HWBLK(MSTPCR2, 17, CORE_AREA), + [HWBLK_KEYSC] = HWBLK(MSTPCR2, 14, SUB_AREA), + [HWBLK_USB] = HWBLK(MSTPCR2, 11, CORE_AREA), + [HWBLK_2DG] = HWBLK(MSTPCR2, 10, CORE_AREA_BM), + [HWBLK_SIU] = HWBLK(MSTPCR2, 8, CORE_AREA), + [HWBLK_VEU2H1] = HWBLK(MSTPCR2, 6, CORE_AREA_BM), + [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM), + [HWBLK_BEU] = HWBLK(MSTPCR2, 4, CORE_AREA_BM), + [HWBLK_CEU] = HWBLK(MSTPCR2, 3, CORE_AREA_BM), + [HWBLK_VEU2H0] = HWBLK(MSTPCR2, 2, CORE_AREA_BM), + [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM), + [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM), +}; + +static struct hwblk_info sh7723_hwblk_info = { + .areas = sh7723_hwblk_area, + .nr_areas = ARRAY_SIZE(sh7723_hwblk_area), + .hwblks = sh7723_hwblk, + .nr_hwblks = ARRAY_SIZE(sh7723_hwblk), +}; + +int arch_hwblk_sleep_mode(void) +{ + if (!sh7723_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE]) + return SUSP_SH_STANDBY | SUSP_SH_SF; + + if (!sh7723_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE]) + return SUSP_SH_SLEEP | SUSP_SH_SF; + + return SUSP_SH_SLEEP; +} + +int __init arch_hwblk_init(void) +{ + return hwblk_register(&sh7723_hwblk_info); +} -- cgit v1.2.3-59-g8ed1b From 719a72b7c75bb239ca6184190ab994b71a31c6dc Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 17 Jul 2009 14:59:55 +0000 Subject: usb: r8a66597-hcd platform data on_chip support Convert the r8a66597-hcd driver to use the on_chip flag from platform data to enable on chip behaviour instead of relying on CONFIG_SUPERH_ON_CHIP_R8A66597 ugliness. This makes the code cleaner and also allows us to support both external and internal r8a66597 with the same kernel. It also makes the Kconfig part more future proof since we with this patch can add support for new processors with on-chip r8a66597 without modifying the Kconfig. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-se/7724/setup.c | 1 + arch/sh/kernel/cpu/sh4a/setup-sh7366.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7723.c | 2 +- drivers/usb/host/Kconfig | 7 -- drivers/usb/host/r8a66597-hcd.c | 187 +++++++++++++++++++-------------- drivers/usb/host/r8a66597.h | 76 ++++++-------- include/linux/usb/r8a66597.h | 3 + 7 files changed, 147 insertions(+), 131 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 8fed45a2fb85..4fb7e48e2843 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -304,6 +304,7 @@ static struct platform_device sh_eth_device = { }; static struct r8a66597_platdata sh7724_usb0_host_data = { + .on_chip = 1, }; static struct resource sh7724_usb0_host_resources[] = { diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c index c18f7d09281b..f6d208813564 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c @@ -40,7 +40,7 @@ static struct platform_device iic_device = { }; static struct r8a66597_platdata r8a66597_data = { - /* This set zero to all members */ + .on_chip = 1, }; static struct resource usb_host_resources[] = { diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index e1bb80b2a27b..28516499a2c4 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c @@ -398,7 +398,7 @@ static struct platform_device rtc_device = { }; static struct r8a66597_platdata r8a66597_data = { - /* This set zero to all members */ + .on_chip = 1, }; static struct resource sh7723_usb_host_resources[] = { diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 1a920c70b5a1..f21ca7d27a43 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -336,13 +336,6 @@ config USB_R8A66597_HCD To compile this driver as a module, choose M here: the module will be called r8a66597-hcd. -config SUPERH_ON_CHIP_R8A66597 - boolean "Enable SuperH on-chip R8A66597 USB" - depends on USB_R8A66597_HCD && (CPU_SUBTYPE_SH7366 || CPU_SUBTYPE_SH7723 || CPU_SUBTYPE_SH7724) - help - This driver enables support for the on-chip R8A66597 in the - SH7366, SH7723 and SH7724 processors. - config USB_WHCI_HCD tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)" depends on EXPERIMENTAL diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 09895a97c10b..82dce3e0d4d7 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -91,43 +91,43 @@ static int r8a66597_clock_enable(struct r8a66597 *r8a66597) u16 tmp; int i = 0; -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) -#if defined(CONFIG_HAVE_CLK) - clk_enable(r8a66597->clk); + if (r8a66597->pdata->on_chip) { +#ifdef CONFIG_HAVE_CLK + clk_enable(r8a66597->clk); #endif - do { - r8a66597_write(r8a66597, SCKE, SYSCFG0); - tmp = r8a66597_read(r8a66597, SYSCFG0); - if (i++ > 1000) { - printk(KERN_ERR "r8a66597: register access fail.\n"); - return -ENXIO; - } - } while ((tmp & SCKE) != SCKE); - r8a66597_write(r8a66597, 0x04, 0x02); -#else - do { - r8a66597_write(r8a66597, USBE, SYSCFG0); - tmp = r8a66597_read(r8a66597, SYSCFG0); - if (i++ > 1000) { - printk(KERN_ERR "r8a66597: register access fail.\n"); - return -ENXIO; - } - } while ((tmp & USBE) != USBE); - r8a66597_bclr(r8a66597, USBE, SYSCFG0); - r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata), XTAL, - SYSCFG0); + do { + r8a66597_write(r8a66597, SCKE, SYSCFG0); + tmp = r8a66597_read(r8a66597, SYSCFG0); + if (i++ > 1000) { + printk(KERN_ERR "r8a66597: reg access fail.\n"); + return -ENXIO; + } + } while ((tmp & SCKE) != SCKE); + r8a66597_write(r8a66597, 0x04, 0x02); + } else { + do { + r8a66597_write(r8a66597, USBE, SYSCFG0); + tmp = r8a66597_read(r8a66597, SYSCFG0); + if (i++ > 1000) { + printk(KERN_ERR "r8a66597: reg access fail.\n"); + return -ENXIO; + } + } while ((tmp & USBE) != USBE); + r8a66597_bclr(r8a66597, USBE, SYSCFG0); + r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata), + XTAL, SYSCFG0); - i = 0; - r8a66597_bset(r8a66597, XCKE, SYSCFG0); - do { - msleep(1); - tmp = r8a66597_read(r8a66597, SYSCFG0); - if (i++ > 500) { - printk(KERN_ERR "r8a66597: register access fail.\n"); - return -ENXIO; - } - } while ((tmp & SCKE) != SCKE); -#endif /* #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) */ + i = 0; + r8a66597_bset(r8a66597, XCKE, SYSCFG0); + do { + msleep(1); + tmp = r8a66597_read(r8a66597, SYSCFG0); + if (i++ > 500) { + printk(KERN_ERR "r8a66597: reg access fail.\n"); + return -ENXIO; + } + } while ((tmp & SCKE) != SCKE); + } return 0; } @@ -136,15 +136,16 @@ static void r8a66597_clock_disable(struct r8a66597 *r8a66597) { r8a66597_bclr(r8a66597, SCKE, SYSCFG0); udelay(1); -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) -#if defined(CONFIG_HAVE_CLK) - clk_disable(r8a66597->clk); -#endif -#else - r8a66597_bclr(r8a66597, PLLC, SYSCFG0); - r8a66597_bclr(r8a66597, XCKE, SYSCFG0); - r8a66597_bclr(r8a66597, USBE, SYSCFG0); + + if (r8a66597->pdata->on_chip) { +#ifdef CONFIG_HAVE_CLK + clk_disable(r8a66597->clk); #endif + } else { + r8a66597_bclr(r8a66597, PLLC, SYSCFG0); + r8a66597_bclr(r8a66597, XCKE, SYSCFG0); + r8a66597_bclr(r8a66597, USBE, SYSCFG0); + } } static void r8a66597_enable_port(struct r8a66597 *r8a66597, int port) @@ -205,7 +206,7 @@ static int enable_controller(struct r8a66597 *r8a66597) r8a66597_bset(r8a66597, SIGNE | SACKE, INTENB1); - for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) + for (port = 0; port < r8a66597->max_root_hub; port++) r8a66597_enable_port(r8a66597, port); return 0; @@ -218,7 +219,7 @@ static void disable_controller(struct r8a66597 *r8a66597) r8a66597_write(r8a66597, 0, INTENB0); r8a66597_write(r8a66597, 0, INTSTS0); - for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) + for (port = 0; port < r8a66597->max_root_hub; port++) r8a66597_disable_port(r8a66597, port); r8a66597_clock_disable(r8a66597); @@ -249,11 +250,12 @@ static int is_hub_limit(char *devpath) return ((strlen(devpath) >= 4) ? 1 : 0); } -static void get_port_number(char *devpath, u16 *root_port, u16 *hub_port) +static void get_port_number(struct r8a66597 *r8a66597, + char *devpath, u16 *root_port, u16 *hub_port) { if (root_port) { *root_port = (devpath[0] & 0x0F) - 1; - if (*root_port >= R8A66597_MAX_ROOT_HUB) + if (*root_port >= r8a66597->max_root_hub) printk(KERN_ERR "r8a66597: Illegal root port number.\n"); } if (hub_port) @@ -355,7 +357,8 @@ static int make_r8a66597_device(struct r8a66597 *r8a66597, INIT_LIST_HEAD(&dev->device_list); list_add_tail(&dev->device_list, &r8a66597->child_device); - get_port_number(urb->dev->devpath, &dev->root_port, &dev->hub_port); + get_port_number(r8a66597, urb->dev->devpath, + &dev->root_port, &dev->hub_port); if (!is_child_device(urb->dev->devpath)) r8a66597->root_hub[dev->root_port].dev = dev; @@ -420,7 +423,7 @@ static void free_usb_address(struct r8a66597 *r8a66597, list_del(&dev->device_list); kfree(dev); - for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) { + for (port = 0; port < r8a66597->max_root_hub; port++) { if (r8a66597->root_hub[port].dev == dev) { r8a66597->root_hub[port].dev = NULL; break; @@ -495,10 +498,20 @@ static void r8a66597_pipe_toggle(struct r8a66597 *r8a66597, r8a66597_bset(r8a66597, SQCLR, pipe->pipectr); } +static inline unsigned short mbw_value(struct r8a66597 *r8a66597) +{ + if (r8a66597->pdata->on_chip) + return MBW_32; + else + return MBW_16; +} + /* this function must be called with interrupt disabled */ static inline void cfifo_change(struct r8a66597 *r8a66597, u16 pipenum) { - r8a66597_mdfy(r8a66597, MBW | pipenum, MBW | CURPIPE, CFIFOSEL); + unsigned short mbw = mbw_value(r8a66597); + + r8a66597_mdfy(r8a66597, mbw | pipenum, mbw | CURPIPE, CFIFOSEL); r8a66597_reg_wait(r8a66597, CFIFOSEL, CURPIPE, pipenum); } @@ -506,11 +519,13 @@ static inline void cfifo_change(struct r8a66597 *r8a66597, u16 pipenum) static inline void fifo_change_from_pipe(struct r8a66597 *r8a66597, struct r8a66597_pipe *pipe) { + unsigned short mbw = mbw_value(r8a66597); + cfifo_change(r8a66597, 0); - r8a66597_mdfy(r8a66597, MBW | 0, MBW | CURPIPE, D0FIFOSEL); - r8a66597_mdfy(r8a66597, MBW | 0, MBW | CURPIPE, D1FIFOSEL); + r8a66597_mdfy(r8a66597, mbw | 0, mbw | CURPIPE, D0FIFOSEL); + r8a66597_mdfy(r8a66597, mbw | 0, mbw | CURPIPE, D1FIFOSEL); - r8a66597_mdfy(r8a66597, MBW | pipe->info.pipenum, MBW | CURPIPE, + r8a66597_mdfy(r8a66597, mbw | pipe->info.pipenum, mbw | CURPIPE, pipe->fifosel); r8a66597_reg_wait(r8a66597, pipe->fifosel, CURPIPE, pipe->info.pipenum); } @@ -742,9 +757,13 @@ static void enable_r8a66597_pipe_dma(struct r8a66597 *r8a66597, struct r8a66597_pipe *pipe, struct urb *urb) { -#if !defined(CONFIG_SUPERH_ON_CHIP_R8A66597) int i; struct r8a66597_pipe_info *info = &pipe->info; + unsigned short mbw = mbw_value(r8a66597); + + /* pipe dma is only for external controlles */ + if (r8a66597->pdata->on_chip) + return; if ((pipe->info.pipenum != 0) && (info->type != R8A66597_INT)) { for (i = 0; i < R8A66597_MAX_DMA_CHANNEL; i++) { @@ -763,8 +782,8 @@ static void enable_r8a66597_pipe_dma(struct r8a66597 *r8a66597, set_pipe_reg_addr(pipe, i); cfifo_change(r8a66597, 0); - r8a66597_mdfy(r8a66597, MBW | pipe->info.pipenum, - MBW | CURPIPE, pipe->fifosel); + r8a66597_mdfy(r8a66597, mbw | pipe->info.pipenum, + mbw | CURPIPE, pipe->fifosel); r8a66597_reg_wait(r8a66597, pipe->fifosel, CURPIPE, pipe->info.pipenum); @@ -772,7 +791,6 @@ static void enable_r8a66597_pipe_dma(struct r8a66597 *r8a66597, break; } } -#endif /* #if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) */ } /* this function must be called with interrupt disabled */ @@ -1769,7 +1787,7 @@ static void r8a66597_timer(unsigned long _r8a66597) spin_lock_irqsave(&r8a66597->lock, flags); - for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) + for (port = 0; port < r8a66597->max_root_hub; port++) r8a66597_root_hub_control(r8a66597, port); spin_unlock_irqrestore(&r8a66597->lock, flags); @@ -1807,7 +1825,7 @@ static void set_address_zero(struct r8a66597 *r8a66597, struct urb *urb) u16 root_port, hub_port; if (usb_address == 0) { - get_port_number(urb->dev->devpath, + get_port_number(r8a66597, urb->dev->devpath, &root_port, &hub_port); set_devadd_reg(r8a66597, 0, get_r8a66597_usb_speed(urb->dev->speed), @@ -2082,7 +2100,7 @@ static int r8a66597_hub_status_data(struct usb_hcd *hcd, char *buf) *buf = 0; /* initialize (no change) */ - for (i = 0; i < R8A66597_MAX_ROOT_HUB; i++) { + for (i = 0; i < r8a66597->max_root_hub; i++) { if (r8a66597->root_hub[i].port & 0xffff0000) *buf |= 1 << (i + 1); } @@ -2097,11 +2115,11 @@ static void r8a66597_hub_descriptor(struct r8a66597 *r8a66597, { desc->bDescriptorType = 0x29; desc->bHubContrCurrent = 0; - desc->bNbrPorts = R8A66597_MAX_ROOT_HUB; + desc->bNbrPorts = r8a66597->max_root_hub; desc->bDescLength = 9; desc->bPwrOn2PwrGood = 0; desc->wHubCharacteristics = cpu_to_le16(0x0011); - desc->bitmap[0] = ((1 << R8A66597_MAX_ROOT_HUB) - 1) << 1; + desc->bitmap[0] = ((1 << r8a66597->max_root_hub) - 1) << 1; desc->bitmap[1] = ~0; } @@ -2129,7 +2147,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, } break; case ClearPortFeature: - if (wIndex > R8A66597_MAX_ROOT_HUB) + if (wIndex > r8a66597->max_root_hub) goto error; if (wLength != 0) goto error; @@ -2162,12 +2180,12 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, *buf = 0x00; break; case GetPortStatus: - if (wIndex > R8A66597_MAX_ROOT_HUB) + if (wIndex > r8a66597->max_root_hub) goto error; *(__le32 *)buf = cpu_to_le32(rh->port); break; case SetPortFeature: - if (wIndex > R8A66597_MAX_ROOT_HUB) + if (wIndex > r8a66597->max_root_hub) goto error; if (wLength != 0) goto error; @@ -2216,7 +2234,7 @@ static int r8a66597_bus_suspend(struct usb_hcd *hcd) dbg("%s", __func__); - for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) { + for (port = 0; port < r8a66597->max_root_hub; port++) { struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; unsigned long dvstctr_reg = get_dvstctr_reg(port); @@ -2247,7 +2265,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd) dbg("%s", __func__); - for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) { + for (port = 0; port < r8a66597->max_root_hub; port++) { struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; unsigned long dvstctr_reg = get_dvstctr_reg(port); @@ -2314,7 +2332,7 @@ static int r8a66597_suspend(struct device *dev) disable_controller(r8a66597); - for (port = 0; port < R8A66597_MAX_ROOT_HUB; port++) { + for (port = 0; port < r8a66597->max_root_hub; port++) { struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; rh->port = 0x00000000; @@ -2354,8 +2372,9 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev) del_timer_sync(&r8a66597->rh_timer); usb_remove_hcd(hcd); iounmap((void *)r8a66597->reg); -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) - clk_put(r8a66597->clk); +#ifdef CONFIG_HAVE_CLK + if (r8a66597->pdata->on_chip) + clk_put(r8a66597->clk); #endif usb_put_hcd(hcd); return 0; @@ -2363,7 +2382,7 @@ static int __init_or_module r8a66597_remove(struct platform_device *pdev) static int __devinit r8a66597_probe(struct platform_device *pdev) { -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) +#ifdef CONFIG_HAVE_CLK char clk_name[8]; #endif struct resource *res = NULL, *ires; @@ -2425,15 +2444,20 @@ static int __devinit r8a66597_probe(struct platform_device *pdev) r8a66597->pdata = pdev->dev.platform_data; r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW; -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) - snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id); - r8a66597->clk = clk_get(&pdev->dev, clk_name); - if (IS_ERR(r8a66597->clk)) { - dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); - ret = PTR_ERR(r8a66597->clk); - goto clean_up2; - } + if (r8a66597->pdata->on_chip) { +#ifdef CONFIG_HAVE_CLK + snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id); + r8a66597->clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(r8a66597->clk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", + clk_name); + ret = PTR_ERR(r8a66597->clk); + goto clean_up2; + } #endif + r8a66597->max_root_hub = 1; + } else + r8a66597->max_root_hub = 2; spin_lock_init(&r8a66597->lock); init_timer(&r8a66597->rh_timer); @@ -2463,8 +2487,9 @@ static int __devinit r8a66597_probe(struct platform_device *pdev) return 0; clean_up3: -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) - clk_put(r8a66597->clk); +#ifdef CONFIG_HAVE_CLK + if (r8a66597->pdata->on_chip) + clk_put(r8a66597->clk); clean_up2: #endif usb_put_hcd(hcd); diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h index d72680b433f9..eecbd917bc81 100644 --- a/drivers/usb/host/r8a66597.h +++ b/drivers/usb/host/r8a66597.h @@ -26,7 +26,7 @@ #ifndef __R8A66597_H__ #define __R8A66597_H__ -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) +#ifdef CONFIG_HAVE_CLK #include #endif @@ -193,13 +193,9 @@ #define REW 0x4000 /* b14: Buffer rewind */ #define DCLRM 0x2000 /* b13: DMA buffer clear mode */ #define DREQE 0x1000 /* b12: DREQ output enable */ -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) -#define MBW 0x0800 -#else -#define MBW 0x0400 /* b10: Maximum bit width for FIFO access */ -#endif #define MBW_8 0x0000 /* 8bit */ #define MBW_16 0x0400 /* 16bit */ +#define MBW_32 0x0800 /* 32bit */ #define BIGEND 0x0100 /* b8: Big endian mode */ #define BYTE_LITTLE 0x0000 /* little dendian */ #define BYTE_BIG 0x0100 /* big endifan */ @@ -405,11 +401,7 @@ #define R8A66597_MAX_NUM_PIPE 10 #define R8A66597_BUF_BSIZE 8 #define R8A66597_MAX_DEVICE 10 -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) -#define R8A66597_MAX_ROOT_HUB 1 -#else #define R8A66597_MAX_ROOT_HUB 2 -#endif #define R8A66597_MAX_SAMPLING 5 #define R8A66597_RH_POLL_TIME 10 #define R8A66597_MAX_DMA_CHANNEL 2 @@ -487,7 +479,7 @@ struct r8a66597_root_hub { struct r8a66597 { spinlock_t lock; unsigned long reg; -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) && defined(CONFIG_HAVE_CLK) +#ifdef CONFIG_HAVE_CLK struct clk *clk; #endif struct r8a66597_platdata *pdata; @@ -504,6 +496,7 @@ struct r8a66597 { unsigned short interval_map; unsigned char pipe_cnt[R8A66597_MAX_NUM_PIPE]; unsigned char dma_map; + unsigned int max_root_hub; struct list_head child_device; unsigned long child_connect_map[4]; @@ -550,21 +543,22 @@ static inline void r8a66597_read_fifo(struct r8a66597 *r8a66597, unsigned long offset, u16 *buf, int len) { -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) unsigned long fifoaddr = r8a66597->reg + offset; unsigned long count; - count = len / 4; - insl(fifoaddr, buf, count); + if (r8a66597->pdata->on_chip) { + count = len / 4; + insl(fifoaddr, buf, count); - if (len & 0x00000003) { - unsigned long tmp = inl(fifoaddr); - memcpy((unsigned char *)buf + count * 4, &tmp, len & 0x03); + if (len & 0x00000003) { + unsigned long tmp = inl(fifoaddr); + memcpy((unsigned char *)buf + count * 4, &tmp, + len & 0x03); + } + } else { + len = (len + 1) / 2; + insw(fifoaddr, buf, len); } -#else - len = (len + 1) / 2; - insw(r8a66597->reg + offset, buf, len); -#endif } static inline void r8a66597_write(struct r8a66597 *r8a66597, u16 val, @@ -578,33 +572,33 @@ static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597, int len) { unsigned long fifoaddr = r8a66597->reg + offset; -#if defined(CONFIG_SUPERH_ON_CHIP_R8A66597) unsigned long count; unsigned char *pb; int i; - count = len / 4; - outsl(fifoaddr, buf, count); + if (r8a66597->pdata->on_chip) { + count = len / 4; + outsl(fifoaddr, buf, count); + + if (len & 0x00000003) { + pb = (unsigned char *)buf + count * 4; + for (i = 0; i < (len & 0x00000003); i++) { + if (r8a66597_read(r8a66597, CFIFOSEL) & BIGEND) + outb(pb[i], fifoaddr + i); + else + outb(pb[i], fifoaddr + 3 - i); + } + } + } else { + int odd = len & 0x0001; - if (len & 0x00000003) { - pb = (unsigned char *)buf + count * 4; - for (i = 0; i < (len & 0x00000003); i++) { - if (r8a66597_read(r8a66597, CFIFOSEL) & BIGEND) - outb(pb[i], fifoaddr + i); - else - outb(pb[i], fifoaddr + 3 - i); + len = len / 2; + outsw(fifoaddr, buf, len); + if (unlikely(odd)) { + buf = &buf[len]; + outb((unsigned char)*buf, fifoaddr); } } -#else - int odd = len & 0x0001; - - len = len / 2; - outsw(fifoaddr, buf, len); - if (unlikely(odd)) { - buf = &buf[len]; - outb((unsigned char)*buf, fifoaddr); - } -#endif } static inline void r8a66597_mdfy(struct r8a66597 *r8a66597, diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h index e9f0384fa20c..460ee3f6a2c6 100644 --- a/include/linux/usb/r8a66597.h +++ b/include/linux/usb/r8a66597.h @@ -31,6 +31,9 @@ struct r8a66597_platdata { /* This ops can controll port power instead of DVSTCTR register. */ void (*port_power)(int port, int power); + /* set one = on chip controller, set zero = external controller */ + unsigned on_chip:1; + /* (external controller only) set R8A66597_PLATDATA_XTAL_nnMHZ */ unsigned xtal:2; -- cgit v1.2.3-59-g8ed1b From 6bde607e699b9f3c6fa5e4a97d78c7778e643b3d Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Mon, 20 Jul 2009 23:30:24 +0900 Subject: sh: Clean up linker script using new BSS_SECTION macro. Updated to use the fixed BSS linker script macros from this thread: http://www.spinics.net/lists/kernel/msg913238.html Signed-off-by: Tim Abbott Cc: Sam Ravnborg Cc: linux-sh@vger.kernel.org Signed-off-by: Paul Mundt --- arch/sh/kernel/vmlinux.lds.S | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index 674ed8feb8ae..80dc9f8d9412 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -93,8 +93,7 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_end = .; - BSS(PAGE_SIZE) - . = ALIGN(4); + BSS_SECTION(0, PAGE_SIZE, 4) _ebss = .; /* uClinux MTD sucks */ _end = . ; -- cgit v1.2.3-59-g8ed1b From 2c59b0b70b9d5d61c726f179724660c4c2423f31 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 22 Jul 2009 14:41:35 +0000 Subject: usb: m66592-udc platform data on_chip support Convert the m66592-udc driver to use the on_chip flag from platform data to enable on chip behaviour instead of relying on CONFIG_SUPERH_BUILT_IN_M66592 ugliness. This makes the code cleaner and also allows us to support both external and internal m66592 with the same kernel. It also makes the Kconfig part more future proof since we with this patch can add support for new processors with on-chip m66592 without modifying the Kconfig. The patch adds a m66592 header file for platform data and ties in platform data to the existing m66592 devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-highlander/setup.c | 7 + arch/sh/boards/mach-x3proto/setup.c | 7 + arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 8 +- drivers/usb/gadget/Kconfig | 10 -- drivers/usb/gadget/m66592-udc.c | 252 +++++++++++++++++++-------------- drivers/usb/gadget/m66592-udc.h | 89 ++++++------ include/linux/usb/m66592.h | 44 ++++++ 7 files changed, 257 insertions(+), 160 deletions(-) create mode 100644 include/linux/usb/m66592.h (limited to 'arch/sh/kernel') diff --git a/arch/sh/boards/mach-highlander/setup.c b/arch/sh/boards/mach-highlander/setup.c index 1639f8915000..566e69d8d729 100644 --- a/arch/sh/boards/mach-highlander/setup.c +++ b/arch/sh/boards/mach-highlander/setup.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -60,6 +61,11 @@ static struct platform_device r8a66597_usb_host_device = { .resource = r8a66597_usb_host_resources, }; +static struct m66592_platdata usbf_platdata = { + .xtal = M66592_PLATDATA_XTAL_24MHZ, + .vif = 1, +}; + static struct resource m66592_usb_peripheral_resources[] = { [0] = { .name = "m66592_udc", @@ -81,6 +87,7 @@ static struct platform_device m66592_usb_peripheral_device = { .dev = { .dma_mask = NULL, /* don't use dma */ .coherent_dma_mask = 0xffffffff, + .platform_data = &usbf_platdata, }, .num_resources = ARRAY_SIZE(m66592_usb_peripheral_resources), .resource = m66592_usb_peripheral_resources, diff --git a/arch/sh/boards/mach-x3proto/setup.c b/arch/sh/boards/mach-x3proto/setup.c index 8913ae39a802..efe4cb9f8a77 100644 --- a/arch/sh/boards/mach-x3proto/setup.c +++ b/arch/sh/boards/mach-x3proto/setup.c @@ -17,6 +17,7 @@ #include #include #include +#include #include static struct resource heartbeat_resources[] = { @@ -89,6 +90,11 @@ static struct platform_device r8a66597_usb_host_device = { .resource = r8a66597_usb_host_resources, }; +static struct m66592_platdata usbf_platdata = { + .xtal = M66592_PLATDATA_XTAL_24MHZ, + .vif = 1, +}; + static struct resource m66592_usb_peripheral_resources[] = { [0] = { .name = "m66592_udc", @@ -109,6 +115,7 @@ static struct platform_device m66592_usb_peripheral_device = { .dev = { .dma_mask = NULL, /* don't use dma */ .coherent_dma_mask = 0xffffffff, + .platform_data = &usbf_platdata, }, .num_resources = ARRAY_SIZE(m66592_usb_peripheral_resources), .resource = m66592_usb_peripheral_resources, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index ea524a2da3e4..0bad14a44238 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -47,9 +48,13 @@ static struct platform_device rtc_device = { .resource = rtc_resources, }; +static struct m66592_platdata usbf_platdata = { + .on_chip = 1, +}; + static struct resource usbf_resources[] = { [0] = { - .name = "m66592_udc", + .name = "USBF", .start = 0x04480000, .end = 0x044800FF, .flags = IORESOURCE_MEM, @@ -67,6 +72,7 @@ static struct platform_device usbf_device = { .dev = { .dma_mask = NULL, .coherent_dma_mask = 0xffffffff, + .platform_data = &usbf_platdata, }, .num_resources = ARRAY_SIZE(usbf_resources), .resource = usbf_resources, diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 7f8e83a954ac..b7f10bc25c2c 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -360,16 +360,6 @@ config USB_M66592 default USB_GADGET select USB_GADGET_SELECTED -config SUPERH_BUILT_IN_M66592 - boolean "Enable SuperH built-in USB like the M66592" - depends on USB_GADGET_M66592 && CPU_SUBTYPE_SH7722 - help - SH7722 has USB like the M66592. - - The transfer rate is very slow when use "Ethernet Gadget". - However, this problem is improved if change a value of - NET_IP_ALIGN to 4. - # # Controllers available only in discrete form (and all PCI controllers) # diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c index 0dddd2f8ff35..a61c70caff12 100644 --- a/drivers/usb/gadget/m66592-udc.c +++ b/drivers/usb/gadget/m66592-udc.c @@ -31,38 +31,12 @@ #include "m66592-udc.h" - MODULE_DESCRIPTION("M66592 USB gadget driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Yoshihiro Shimoda"); MODULE_ALIAS("platform:m66592_udc"); -#define DRIVER_VERSION "26 Jun 2009" - -/* module parameters */ -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) -static unsigned short endian = M66592_LITTLE; -module_param(endian, ushort, 0644); -MODULE_PARM_DESC(endian, "data endian: big=0, little=0 (default=0)"); -#else -static unsigned short clock = M66592_XTAL24; -module_param(clock, ushort, 0644); -MODULE_PARM_DESC(clock, "input clock: 48MHz=32768, 24MHz=16384, 12MHz=0 " - "(default=16384)"); - -static unsigned short vif = M66592_LDRV; -module_param(vif, ushort, 0644); -MODULE_PARM_DESC(vif, "input VIF: 3.3V=32768, 1.5V=0 (default=32768)"); - -static unsigned short endian; -module_param(endian, ushort, 0644); -MODULE_PARM_DESC(endian, "data endian: big=256, little=0 (default=0)"); - -static unsigned short irq_sense = M66592_INTL; -module_param(irq_sense, ushort, 0644); -MODULE_PARM_DESC(irq_sense, "IRQ sense: low level=2, falling edge=0 " - "(default=2)"); -#endif +#define DRIVER_VERSION "21 July 2009" static const char udc_name[] = "m66592_udc"; static const char *m66592_ep_name[] = { @@ -244,6 +218,7 @@ static inline int get_buffer_size(struct m66592 *m66592, u16 pipenum) static inline void pipe_change(struct m66592 *m66592, u16 pipenum) { struct m66592_ep *ep = m66592->pipenum2ep[pipenum]; + unsigned short mbw; if (ep->use_dma) return; @@ -252,7 +227,12 @@ static inline void pipe_change(struct m66592 *m66592, u16 pipenum) ndelay(450); - m66592_bset(m66592, M66592_MBW, ep->fifosel); + if (m66592->pdata->on_chip) + mbw = M66592_MBW_32; + else + mbw = M66592_MBW_16; + + m66592_bset(m66592, mbw, ep->fifosel); } static int pipe_buffer_setting(struct m66592 *m66592, @@ -332,6 +312,7 @@ static void pipe_buffer_release(struct m66592 *m66592, static void pipe_initialize(struct m66592_ep *ep) { struct m66592 *m66592 = ep->m66592; + unsigned short mbw; m66592_mdfy(m66592, 0, M66592_CURPIPE, ep->fifosel); @@ -343,7 +324,12 @@ static void pipe_initialize(struct m66592_ep *ep) ndelay(450); - m66592_bset(m66592, M66592_MBW, ep->fifosel); + if (m66592->pdata->on_chip) + mbw = M66592_MBW_32; + else + mbw = M66592_MBW_16; + + m66592_bset(m66592, mbw, ep->fifosel); } } @@ -359,15 +345,13 @@ static void m66592_ep_setting(struct m66592 *m66592, struct m66592_ep *ep, ep->fifosel = M66592_D0FIFOSEL; ep->fifoctr = M66592_D0FIFOCTR; ep->fifotrn = M66592_D0FIFOTRN; -#if !defined(CONFIG_SUPERH_BUILT_IN_M66592) - } else if (m66592->num_dma == 1) { + } else if (!m66592->pdata->on_chip && m66592->num_dma == 1) { m66592->num_dma++; ep->use_dma = 1; ep->fifoaddr = M66592_D1FIFO; ep->fifosel = M66592_D1FIFOSEL; ep->fifoctr = M66592_D1FIFOCTR; ep->fifotrn = M66592_D1FIFOTRN; -#endif } else { ep->use_dma = 0; ep->fifoaddr = M66592_CFIFO; @@ -612,76 +596,120 @@ static void start_ep0(struct m66592_ep *ep, struct m66592_request *req) } } -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) static void init_controller(struct m66592 *m66592) { - m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ - m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG); - m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); - m66592_bset(m66592, M66592_USBE, M66592_SYSCFG); + unsigned int endian; - /* This is a workaound for SH7722 2nd cut */ - m66592_bset(m66592, 0x8000, M66592_DVSTCTR); - m66592_bset(m66592, 0x1000, M66592_TESTMODE); - m66592_bclr(m66592, 0x8000, M66592_DVSTCTR); + if (m66592->pdata->on_chip) { + if (m66592->pdata->endian) + endian = 0; /* big endian */ + else + endian = M66592_LITTLE; /* little endian */ - m66592_bset(m66592, M66592_INTL, M66592_INTENB1); + m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ + m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG); + m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); + m66592_bset(m66592, M66592_USBE, M66592_SYSCFG); - m66592_write(m66592, 0, M66592_CFBCFG); - m66592_write(m66592, 0, M66592_D0FBCFG); - m66592_bset(m66592, endian, M66592_CFBCFG); - m66592_bset(m66592, endian, M66592_D0FBCFG); -} -#else /* #if defined(CONFIG_SUPERH_BUILT_IN_M66592) */ -static void init_controller(struct m66592 *m66592) -{ - m66592_bset(m66592, (vif & M66592_LDRV) | (endian & M66592_BIGEND), - M66592_PINCFG); - m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ - m66592_mdfy(m66592, clock & M66592_XTAL, M66592_XTAL, M66592_SYSCFG); + /* This is a workaound for SH7722 2nd cut */ + m66592_bset(m66592, 0x8000, M66592_DVSTCTR); + m66592_bset(m66592, 0x1000, M66592_TESTMODE); + m66592_bclr(m66592, 0x8000, M66592_DVSTCTR); - m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG); - m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); - m66592_bset(m66592, M66592_USBE, M66592_SYSCFG); + m66592_bset(m66592, M66592_INTL, M66592_INTENB1); + + m66592_write(m66592, 0, M66592_CFBCFG); + m66592_write(m66592, 0, M66592_D0FBCFG); + m66592_bset(m66592, endian, M66592_CFBCFG); + m66592_bset(m66592, endian, M66592_D0FBCFG); + } else { + unsigned int clock, vif, irq_sense; + + if (m66592->pdata->endian) + endian = M66592_BIGEND; /* big endian */ + else + endian = 0; /* little endian */ + + if (m66592->pdata->vif) + vif = M66592_LDRV; /* 3.3v */ + else + vif = 0; /* 1.5v */ + + switch (m66592->pdata->xtal) { + case M66592_PLATDATA_XTAL_12MHZ: + clock = M66592_XTAL12; + break; + case M66592_PLATDATA_XTAL_24MHZ: + clock = M66592_XTAL24; + break; + case M66592_PLATDATA_XTAL_48MHZ: + clock = M66592_XTAL48; + break; + default: + pr_warning("m66592-udc: xtal configuration error\n"); + clock = 0; + } - m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG); + switch (m66592->irq_trigger) { + case IRQF_TRIGGER_LOW: + irq_sense = M66592_INTL; + break; + case IRQF_TRIGGER_FALLING: + irq_sense = 0; + break; + default: + pr_warning("m66592-udc: irq trigger config error\n"); + irq_sense = 0; + } - msleep(3); + m66592_bset(m66592, + (vif & M66592_LDRV) | (endian & M66592_BIGEND), + M66592_PINCFG); + m66592_bset(m66592, M66592_HSE, M66592_SYSCFG); /* High spd */ + m66592_mdfy(m66592, clock & M66592_XTAL, M66592_XTAL, + M66592_SYSCFG); + m66592_bclr(m66592, M66592_USBE, M66592_SYSCFG); + m66592_bclr(m66592, M66592_DPRPU, M66592_SYSCFG); + m66592_bset(m66592, M66592_USBE, M66592_SYSCFG); - m66592_bset(m66592, M66592_RCKE | M66592_PLLC, M66592_SYSCFG); + m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG); + + msleep(3); - msleep(1); + m66592_bset(m66592, M66592_RCKE | M66592_PLLC, M66592_SYSCFG); - m66592_bset(m66592, M66592_SCKE, M66592_SYSCFG); + msleep(1); - m66592_bset(m66592, irq_sense & M66592_INTL, M66592_INTENB1); - m66592_write(m66592, M66592_BURST | M66592_CPU_ADR_RD_WR, - M66592_DMA0CFG); + m66592_bset(m66592, M66592_SCKE, M66592_SYSCFG); + + m66592_bset(m66592, irq_sense & M66592_INTL, M66592_INTENB1); + m66592_write(m66592, M66592_BURST | M66592_CPU_ADR_RD_WR, + M66592_DMA0CFG); + } } -#endif /* #if defined(CONFIG_SUPERH_BUILT_IN_M66592) */ static void disable_controller(struct m66592 *m66592) { -#if !defined(CONFIG_SUPERH_BUILT_IN_M66592) - m66592_bclr(m66592, M66592_SCKE, M66592_SYSCFG); - udelay(1); - m66592_bclr(m66592, M66592_PLLC, M66592_SYSCFG); - udelay(1); - m66592_bclr(m66592, M66592_RCKE, M66592_SYSCFG); - udelay(1); - m66592_bclr(m66592, M66592_XCKE, M66592_SYSCFG); -#endif + if (!m66592->pdata->on_chip) { + m66592_bclr(m66592, M66592_SCKE, M66592_SYSCFG); + udelay(1); + m66592_bclr(m66592, M66592_PLLC, M66592_SYSCFG); + udelay(1); + m66592_bclr(m66592, M66592_RCKE, M66592_SYSCFG); + udelay(1); + m66592_bclr(m66592, M66592_XCKE, M66592_SYSCFG); + } } static void m66592_start_xclock(struct m66592 *m66592) { -#if !defined(CONFIG_SUPERH_BUILT_IN_M66592) u16 tmp; - tmp = m66592_read(m66592, M66592_SYSCFG); - if (!(tmp & M66592_XCKE)) - m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG); -#endif + if (!m66592->pdata->on_chip) { + tmp = m66592_read(m66592, M66592_SYSCFG); + if (!(tmp & M66592_XCKE)) + m66592_bset(m66592, M66592_XCKE, M66592_SYSCFG); + } } /*-------------------------------------------------------------------------*/ @@ -1169,8 +1197,7 @@ static irqreturn_t m66592_irq(int irq, void *_m66592) intsts0 = m66592_read(m66592, M66592_INTSTS0); intenb0 = m66592_read(m66592, M66592_INTENB0); -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) - if (!intsts0 && !intenb0) { + if (m66592->pdata->on_chip && !intsts0 && !intenb0) { /* * When USB clock stops, it cannot read register. Even if a * clock stops, the interrupt occurs. So this driver turn on @@ -1180,7 +1207,6 @@ static irqreturn_t m66592_irq(int irq, void *_m66592) intsts0 = m66592_read(m66592, M66592_INTSTS0); intenb0 = m66592_read(m66592, M66592_INTENB0); } -#endif savepipe = m66592_read(m66592, M66592_CFIFOSEL); @@ -1526,9 +1552,11 @@ static int __exit m66592_remove(struct platform_device *pdev) iounmap(m66592->reg); free_irq(platform_get_irq(pdev, 0), m66592); m66592_free_request(&m66592->ep[0].ep, m66592->ep0_req); -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) - clk_disable(m66592->clk); - clk_put(m66592->clk); +#ifdef CONFIG_HAVE_CLK + if (m66592->pdata->on_chip) { + clk_disable(m66592->clk); + clk_put(m66592->clk); + } #endif kfree(m66592); return 0; @@ -1540,11 +1568,10 @@ static void nop_completion(struct usb_ep *ep, struct usb_request *r) static int __init m66592_probe(struct platform_device *pdev) { - struct resource *res; - int irq; + struct resource *res, *ires; void __iomem *reg = NULL; struct m66592 *m66592 = NULL; -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) +#ifdef CONFIG_HAVE_CLK char clk_name[8]; #endif int ret = 0; @@ -1557,10 +1584,11 @@ static int __init m66592_probe(struct platform_device *pdev) goto clean_up; } - irq = platform_get_irq(pdev, 0); - if (irq < 0) { + ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!ires) { ret = -ENODEV; - pr_err("platform_get_irq error.\n"); + dev_err(&pdev->dev, + "platform_get_resource IORESOURCE_IRQ error.\n"); goto clean_up; } @@ -1571,6 +1599,12 @@ static int __init m66592_probe(struct platform_device *pdev) goto clean_up; } + if (pdev->dev.platform_data == NULL) { + dev_err(&pdev->dev, "no platform data\n"); + ret = -ENODEV; + goto clean_up; + } + /* initialize ucd */ m66592 = kzalloc(sizeof(struct m66592), GFP_KERNEL); if (m66592 == NULL) { @@ -1578,6 +1612,9 @@ static int __init m66592_probe(struct platform_device *pdev) goto clean_up; } + m66592->pdata = pdev->dev.platform_data; + m66592->irq_trigger = ires->flags & IRQF_TRIGGER_MASK; + spin_lock_init(&m66592->lock); dev_set_drvdata(&pdev->dev, m66592); @@ -1595,22 +1632,25 @@ static int __init m66592_probe(struct platform_device *pdev) m66592->timer.data = (unsigned long)m66592; m66592->reg = reg; - ret = request_irq(irq, m66592_irq, IRQF_DISABLED | IRQF_SHARED, + ret = request_irq(ires->start, m66592_irq, IRQF_DISABLED | IRQF_SHARED, udc_name, m66592); if (ret < 0) { pr_err("request_irq error (%d)\n", ret); goto clean_up; } -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) - snprintf(clk_name, sizeof(clk_name), "usbf%d", pdev->id); - m66592->clk = clk_get(&pdev->dev, clk_name); - if (IS_ERR(m66592->clk)) { - dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); - ret = PTR_ERR(m66592->clk); - goto clean_up2; +#ifdef CONFIG_HAVE_CLK + if (m66592->pdata->on_chip) { + snprintf(clk_name, sizeof(clk_name), "usbf%d", pdev->id); + m66592->clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(m66592->clk)) { + dev_err(&pdev->dev, "cannot get clock \"%s\"\n", + clk_name); + ret = PTR_ERR(m66592->clk); + goto clean_up2; + } + clk_enable(m66592->clk); } - clk_enable(m66592->clk); #endif INIT_LIST_HEAD(&m66592->gadget.ep_list); m66592->gadget.ep0 = &m66592->ep[0].ep; @@ -1652,12 +1692,14 @@ static int __init m66592_probe(struct platform_device *pdev) return 0; clean_up3: -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) - clk_disable(m66592->clk); - clk_put(m66592->clk); +#ifdef CONFIG_HAVE_CLK + if (m66592->pdata->on_chip) { + clk_disable(m66592->clk); + clk_put(m66592->clk); + } clean_up2: #endif - free_irq(irq, m66592); + free_irq(ires->start, m66592); clean_up: if (m66592) { if (m66592->ep0_req) diff --git a/drivers/usb/gadget/m66592-udc.h b/drivers/usb/gadget/m66592-udc.h index 9a9c2bf9fbd5..8b960deed680 100644 --- a/drivers/usb/gadget/m66592-udc.h +++ b/drivers/usb/gadget/m66592-udc.h @@ -23,10 +23,12 @@ #ifndef __M66592_UDC_H__ #define __M66592_UDC_H__ -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) +#ifdef CONFIG_HAVE_CLK #include #endif +#include + #define M66592_SYSCFG 0x00 #define M66592_XTAL 0xC000 /* b15-14: Crystal selection */ #define M66592_XTAL48 0x8000 /* 48MHz */ @@ -76,11 +78,11 @@ #define M66592_P_TST_J 0x0001 /* PERI TEST J */ #define M66592_P_TST_NORMAL 0x0000 /* PERI Normal Mode */ -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) +/* built-in registers */ #define M66592_CFBCFG 0x0A #define M66592_D0FBCFG 0x0C #define M66592_LITTLE 0x0100 /* b8: Little endian mode */ -#else +/* external chip case */ #define M66592_PINCFG 0x0A #define M66592_LDRV 0x8000 /* b15: Drive Current Adjust */ #define M66592_BIGEND 0x0100 /* b8: Big endian mode */ @@ -100,8 +102,8 @@ #define M66592_PKTM 0x0020 /* b5: Packet mode */ #define M66592_DENDE 0x0010 /* b4: Dend enable */ #define M66592_OBUS 0x0004 /* b2: OUTbus mode */ -#endif /* #if defined(CONFIG_SUPERH_BUILT_IN_M66592) */ +/* common case */ #define M66592_CFIFO 0x10 #define M66592_D0FIFO 0x14 #define M66592_D1FIFO 0x18 @@ -113,13 +115,9 @@ #define M66592_REW 0x4000 /* b14: Buffer rewind */ #define M66592_DCLRM 0x2000 /* b13: DMA buffer clear mode */ #define M66592_DREQE 0x1000 /* b12: DREQ output enable */ -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) -#define M66592_MBW 0x0800 /* b11: Maximum bit width for FIFO */ -#else -#define M66592_MBW 0x0400 /* b10: Maximum bit width for FIFO */ -#define M66592_MBW_8 0x0000 /* 8bit */ -#define M66592_MBW_16 0x0400 /* 16bit */ -#endif /* #if defined(CONFIG_SUPERH_BUILT_IN_M66592) */ +#define M66592_MBW_8 0x0000 /* 8bit */ +#define M66592_MBW_16 0x0400 /* 16bit */ +#define M66592_MBW_32 0x0800 /* 32bit */ #define M66592_TRENB 0x0200 /* b9: Transaction counter enable */ #define M66592_TRCLR 0x0100 /* b8: Transaction counter clear */ #define M66592_DEZPM 0x0080 /* b7: Zero-length packet mode */ @@ -480,9 +478,11 @@ struct m66592_ep { struct m66592 { spinlock_t lock; void __iomem *reg; -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) && defined(CONFIG_HAVE_CLK) +#ifdef CONFIG_HAVE_CLK struct clk *clk; #endif + struct m66592_platdata *pdata; + unsigned long irq_trigger; struct usb_gadget gadget; struct usb_gadget_driver *driver; @@ -546,13 +546,13 @@ static inline void m66592_read_fifo(struct m66592 *m66592, { unsigned long fifoaddr = (unsigned long)m66592->reg + offset; -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) - len = (len + 3) / 4; - insl(fifoaddr, buf, len); -#else - len = (len + 1) / 2; - insw(fifoaddr, buf, len); -#endif + if (m66592->pdata->on_chip) { + len = (len + 3) / 4; + insl(fifoaddr, buf, len); + } else { + len = (len + 1) / 2; + insw(fifoaddr, buf, len); + } } static inline void m66592_write(struct m66592 *m66592, u16 val, @@ -566,33 +566,34 @@ static inline void m66592_write_fifo(struct m66592 *m66592, void *buf, unsigned long len) { unsigned long fifoaddr = (unsigned long)m66592->reg + offset; -#if defined(CONFIG_SUPERH_BUILT_IN_M66592) - unsigned long count; - unsigned char *pb; - int i; - - count = len / 4; - outsl(fifoaddr, buf, count); - - if (len & 0x00000003) { - pb = buf + count * 4; - for (i = 0; i < (len & 0x00000003); i++) { - if (m66592_read(m66592, M66592_CFBCFG)) /* little */ - outb(pb[i], fifoaddr + (3 - i)); - else - outb(pb[i], fifoaddr + i); + + if (m66592->pdata->on_chip) { + unsigned long count; + unsigned char *pb; + int i; + + count = len / 4; + outsl(fifoaddr, buf, count); + + if (len & 0x00000003) { + pb = buf + count * 4; + for (i = 0; i < (len & 0x00000003); i++) { + if (m66592_read(m66592, M66592_CFBCFG)) /* le */ + outb(pb[i], fifoaddr + (3 - i)); + else + outb(pb[i], fifoaddr + i); + } + } + } else { + unsigned long odd = len & 0x0001; + + len = len / 2; + outsw(fifoaddr, buf, len); + if (odd) { + unsigned char *p = buf + len*2; + outb(*p, fifoaddr); } } -#else - unsigned long odd = len & 0x0001; - - len = len / 2; - outsw(fifoaddr, buf, len); - if (odd) { - unsigned char *p = buf + len*2; - outb(*p, fifoaddr); - } -#endif /* #if defined(CONFIG_SUPERH_BUILT_IN_M66592) */ } static inline void m66592_mdfy(struct m66592 *m66592, u16 val, u16 pat, diff --git a/include/linux/usb/m66592.h b/include/linux/usb/m66592.h new file mode 100644 index 000000000000..cda9625e7df0 --- /dev/null +++ b/include/linux/usb/m66592.h @@ -0,0 +1,44 @@ +/* + * M66592 driver platform data + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#ifndef __LINUX_USB_M66592_H +#define __LINUX_USB_M66592_H + +#define M66592_PLATDATA_XTAL_12MHZ 0x01 +#define M66592_PLATDATA_XTAL_24MHZ 0x02 +#define M66592_PLATDATA_XTAL_48MHZ 0x03 + +struct m66592_platdata { + /* one = on chip controller, zero = external controller */ + unsigned on_chip:1; + + /* one = big endian, zero = little endian */ + unsigned endian:1; + + /* (external controller only) M66592_PLATDATA_XTAL_nnMHZ */ + unsigned xtal:2; + + /* (external controller only) one = 3.3V, zero = 1.5V */ + unsigned vif:1; + +}; + +#endif /* __LINUX_USB_M66592_H */ + -- cgit v1.2.3-59-g8ed1b From 955c9863bb5855a994751843e7066017edc00410 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 22 Jul 2009 15:14:29 +0000 Subject: sh: convert processor device setup functions to arch_initcall() Convert the processor platform device setup functions from __initcall() and sometimes device_initcall() to arch_initcall(). This makes sure that the platform devices are registered a bit earlier so the devices are available when drivers register using initcall levels earlier than device_initcall(). A good example is platform devices needed by i2c-sh_mobile.c which registers a bit earlier using subsys_initcall(). Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh2/setup-sh7619.c | 2 +- arch/sh/kernel/cpu/sh2a/setup-mxg.c | 2 +- arch/sh/kernel/cpu/sh2a/setup-sh7201.c | 2 +- arch/sh/kernel/cpu/sh2a/setup-sh7203.c | 2 +- arch/sh/kernel/cpu/sh2a/setup-sh7206.c | 2 +- arch/sh/kernel/cpu/sh3/setup-sh7705.c | 2 +- arch/sh/kernel/cpu/sh3/setup-sh770x.c | 2 +- arch/sh/kernel/cpu/sh3/setup-sh7710.c | 2 +- arch/sh/kernel/cpu/sh3/setup-sh7720.c | 2 +- arch/sh/kernel/cpu/sh4/setup-sh4-202.c | 2 +- arch/sh/kernel/cpu/sh4/setup-sh7750.c | 2 +- arch/sh/kernel/cpu/sh4/setup-sh7760.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7343.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7366.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7723.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7724.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7763.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7770.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7780.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7785.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-sh7786.c | 2 +- arch/sh/kernel/cpu/sh4a/setup-shx3.c | 2 +- arch/sh/kernel/cpu/sh5/setup-sh5.c | 2 +- 24 files changed, 24 insertions(+), 24 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c index 13798733f2db..8555c05e8667 100644 --- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c @@ -187,7 +187,7 @@ static int __init sh7619_devices_setup(void) return platform_add_devices(sh7619_devices, ARRAY_SIZE(sh7619_devices)); } -__initcall(sh7619_devices_setup); +arch_initcall(sh7619_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c index 869c2da4820b..b67376445315 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-mxg.c +++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c @@ -238,7 +238,7 @@ static int __init mxg_devices_setup(void) return platform_add_devices(mxg_devices, ARRAY_SIZE(mxg_devices)); } -__initcall(mxg_devices_setup); +arch_initcall(mxg_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c index d8febe128066..fbde5b75deb9 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c @@ -357,7 +357,7 @@ static int __init sh7201_devices_setup(void) return platform_add_devices(sh7201_devices, ARRAY_SIZE(sh7201_devices)); } -__initcall(sh7201_devices_setup); +arch_initcall(sh7201_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c index 62e3039d2398..d3fd536c9a84 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c @@ -367,7 +367,7 @@ static int __init sh7203_devices_setup(void) return platform_add_devices(sh7203_devices, ARRAY_SIZE(sh7203_devices)); } -__initcall(sh7203_devices_setup); +arch_initcall(sh7203_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c index 3e6f3d7a58be..a9ccc5e8d9e9 100644 --- a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c @@ -338,7 +338,7 @@ static int __init sh7206_devices_setup(void) return platform_add_devices(sh7206_devices, ARRAY_SIZE(sh7206_devices)); } -__initcall(sh7206_devices_setup); +arch_initcall(sh7206_devices_setup); void __init plat_irq_setup(void) { diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c index 88f742fed9ed..c23105983878 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7705.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c @@ -222,7 +222,7 @@ static int __init sh7705_devices_setup(void) return platform_add_devices(sh7705_devices, ARRAY_SIZE(sh7705_devices)); } -__initcall(sh7705_devices_setup); +arch_initcall(sh7705_devices_setup); static struct platform_device *sh7705_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c index c56306798584..347ab35d0697 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c @@ -250,7 +250,7 @@ static int __init sh770x_devices_setup(void) return platform_add_devices(sh770x_devices, ARRAY_SIZE(sh770x_devices)); } -__initcall(sh770x_devices_setup); +arch_initcall(sh770x_devices_setup); static struct platform_device *sh770x_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c index efa76c8148f4..717e90ae1097 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7710.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c @@ -226,7 +226,7 @@ static int __init sh7710_devices_setup(void) return platform_add_devices(sh7710_devices, ARRAY_SIZE(sh7710_devices)); } -__initcall(sh7710_devices_setup); +arch_initcall(sh7710_devices_setup); static struct platform_device *sh7710_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c index 5b2107798edb..74d8baaf8e96 100644 --- a/arch/sh/kernel/cpu/sh3/setup-sh7720.c +++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c @@ -388,7 +388,7 @@ static int __init sh7720_devices_setup(void) return platform_add_devices(sh7720_devices, ARRAY_SIZE(sh7720_devices)); } -__initcall(sh7720_devices_setup); +arch_initcall(sh7720_devices_setup); static struct platform_device *sh7720_early_devices[] __initdata = { &cmt0_device, diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c index 6d088d123591..de4827df19aa 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c @@ -138,7 +138,7 @@ static int __init sh4202_devices_setup(void) return platform_add_devices(sh4202_devices, ARRAY_SIZE(sh4202_devices)); } -__initcall(sh4202_devices_setup); +arch_initcall(sh4202_devices_setup); static struct platform_device *sh4202_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index 851672d15cf4..1b8b122e8f3d 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c @@ -239,7 +239,7 @@ static int __init sh7750_devices_setup(void) return platform_add_devices(sh7750_devices, ARRAY_SIZE(sh7750_devices)); } -__initcall(sh7750_devices_setup); +arch_initcall(sh7750_devices_setup); static struct platform_device *sh7750_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c index 5b822519bd90..7fbb7be9284c 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7760.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c @@ -265,7 +265,7 @@ static int __init sh7760_devices_setup(void) return platform_add_devices(sh7760_devices, ARRAY_SIZE(sh7760_devices)); } -__initcall(sh7760_devices_setup); +arch_initcall(sh7760_devices_setup); static struct platform_device *sh7760_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c index 6307e087c864..ac4d5672ec1a 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c @@ -325,7 +325,7 @@ static int __init sh7343_devices_setup(void) return platform_add_devices(sh7343_devices, ARRAY_SIZE(sh7343_devices)); } -__initcall(sh7343_devices_setup); +arch_initcall(sh7343_devices_setup); static struct platform_device *sh7343_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c index f6d208813564..4a9010bf4fd3 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c @@ -318,7 +318,7 @@ static int __init sh7366_devices_setup(void) return platform_add_devices(sh7366_devices, ARRAY_SIZE(sh7366_devices)); } -__initcall(sh7366_devices_setup); +arch_initcall(sh7366_devices_setup); static struct platform_device *sh7366_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index 0bad14a44238..67b0d87fcb27 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -365,7 +365,7 @@ static int __init sh7722_devices_setup(void) return platform_add_devices(sh7722_devices, ARRAY_SIZE(sh7722_devices)); } -__initcall(sh7722_devices_setup); +arch_initcall(sh7722_devices_setup); static struct platform_device *sh7722_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index 28516499a2c4..26dc4d323252 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c @@ -473,7 +473,7 @@ static int __init sh7723_devices_setup(void) return platform_add_devices(sh7723_devices, ARRAY_SIZE(sh7723_devices)); } -__initcall(sh7723_devices_setup); +arch_initcall(sh7723_devices_setup); static struct platform_device *sh7723_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index e5ac9eb11c63..a04edaab9a29 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c @@ -508,7 +508,7 @@ static int __init sh7724_devices_setup(void) return platform_add_devices(sh7724_devices, ARRAY_SIZE(sh7724_devices)); } -device_initcall(sh7724_devices_setup); +arch_initcall(sh7724_devices_setup); static struct platform_device *sh7724_early_devices[] __initdata = { &cmt_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c index f1e0c0d36da7..4659fff6b842 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c @@ -314,7 +314,7 @@ static int __init sh7763_devices_setup(void) return platform_add_devices(sh7763_devices, ARRAY_SIZE(sh7763_devices)); } -__initcall(sh7763_devices_setup); +arch_initcall(sh7763_devices_setup); static struct platform_device *sh7763_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c index 1e86209db284..eead08d89d32 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c @@ -368,7 +368,7 @@ static int __init sh7770_devices_setup(void) return platform_add_devices(sh7770_devices, ARRAY_SIZE(sh7770_devices)); } -__initcall(sh7770_devices_setup); +arch_initcall(sh7770_devices_setup); static struct platform_device *sh7770_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c index 715e05b431e5..2c901f446959 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c @@ -256,7 +256,7 @@ static int __init sh7780_devices_setup(void) return platform_add_devices(sh7780_devices, ARRAY_SIZE(sh7780_devices)); } -__initcall(sh7780_devices_setup); +arch_initcall(sh7780_devices_setup); static struct platform_device *sh7780_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c index af561402570b..7f6c718b6c36 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c @@ -263,7 +263,7 @@ static int __init sh7785_devices_setup(void) return platform_add_devices(sh7785_devices, ARRAY_SIZE(sh7785_devices)); } -__initcall(sh7785_devices_setup); +arch_initcall(sh7785_devices_setup); static struct platform_device *sh7785_early_devices[] __initdata = { &tmu0_device, diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c index b70049470a0b..0104a8ec5369 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c @@ -547,7 +547,7 @@ static int __init sh7786_devices_setup(void) return platform_add_devices(sh7786_devices, ARRAY_SIZE(sh7786_devices)); } -device_initcall(sh7786_devices_setup); +arch_initcall(sh7786_devices_setup); void __init plat_early_device_setup(void) { diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c index 53c65fd9ccef..07f078961c71 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c @@ -256,7 +256,7 @@ static int __init shx3_devices_setup(void) return platform_add_devices(shx3_devices, ARRAY_SIZE(shx3_devices)); } -__initcall(shx3_devices_setup); +arch_initcall(shx3_devices_setup); void __init plat_early_device_setup(void) { diff --git a/arch/sh/kernel/cpu/sh5/setup-sh5.c b/arch/sh/kernel/cpu/sh5/setup-sh5.c index f5ff1ac57fc2..6a0f82f70032 100644 --- a/arch/sh/kernel/cpu/sh5/setup-sh5.c +++ b/arch/sh/kernel/cpu/sh5/setup-sh5.c @@ -186,7 +186,7 @@ static int __init sh5_devices_setup(void) return platform_add_devices(sh5_devices, ARRAY_SIZE(sh5_devices)); } -__initcall(sh5_devices_setup); +arch_initcall(sh5_devices_setup); void __init plat_early_device_setup(void) { -- cgit v1.2.3-59-g8ed1b From dfff0fa65ab15db45acd64b3189787d37ab163cd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 27 Jul 2009 20:53:22 +0900 Subject: sh: wire up clear_user_highpage() for sh4, convert sh7705. This wires up clear_user_highpage() on SH-4 and subsequently converts the SH7705 32kB cache mode over to using it. Now that the SH-4 implementation handles all of the dcache purging directly in the aliasing case, there is no need to do this in the default clear_page() implementation. Signed-off-by: Paul Mundt --- arch/sh/include/asm/cacheflush.h | 2 +- arch/sh/include/asm/page.h | 12 +++-- arch/sh/include/asm/pgtable.h | 3 +- arch/sh/kernel/sh_ksyms_32.c | 6 --- arch/sh/lib/clear_page.S | 46 ------------------ arch/sh/mm/Makefile_32 | 2 +- arch/sh/mm/pg-nommu.c | 7 +-- arch/sh/mm/pg-sh4.c | 29 ++++++----- arch/sh/mm/pg-sh7705.c | 102 --------------------------------------- 9 files changed, 27 insertions(+), 182 deletions(-) delete mode 100644 arch/sh/mm/pg-sh7705.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index 4c5462daa74c..4e360114269d 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -49,7 +49,7 @@ static inline void flush_kernel_dcache_page(struct page *page) flush_dcache_page(page); } -#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_CACHE_OFF) +#if (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) && !defined(CONFIG_CACHE_OFF) extern void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len); diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index a31ab40040f0..5208b7bfc24e 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -56,21 +56,25 @@ pages_do_alias(unsigned long addr1, unsigned long addr2) return (addr1 ^ addr2) & shm_align_mask; } -extern void clear_page(void *to); + +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, void *from); +struct page; +struct vm_area_struct; + #if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ (defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \ defined(CONFIG_SH7705_CACHE_32KB)) -struct page; -struct vm_area_struct; extern void clear_user_page(void *to, unsigned long address, struct page *page); extern void copy_user_page(void *to, void *from, unsigned long address, struct page *page); -#if defined(CONFIG_CPU_SH4) +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) extern void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); #define __HAVE_ARCH_COPY_USER_HIGHPAGE +extern void clear_user_highpage(struct page *page, unsigned long vaddr); +#define clear_user_highpage clear_user_highpage #endif #else #define clear_user_page(page, vaddr, pg) clear_page(page) diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index d9f68f9c3cb3..bef3ab7fc09e 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -141,7 +141,8 @@ extern void paging_init(void); extern void page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd); -#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_CPU_SH4) && defined(CONFIG_MMU) +#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ + defined(CONFIG_SH7705_CACHE_32KB)) && defined(CONFIG_MMU) extern void kmap_coherent_init(void); #else #define kmap_coherent_init() do { } while (0) diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index cec610888e28..8dbe26b17c44 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c @@ -101,11 +101,6 @@ EXPORT_SYMBOL(flush_cache_range); EXPORT_SYMBOL(flush_dcache_page); #endif -#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ - (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) -EXPORT_SYMBOL(clear_user_page); -#endif - #ifdef CONFIG_MCOUNT DECLARE_EXPORT(mcount); #endif @@ -114,7 +109,6 @@ EXPORT_SYMBOL(csum_partial_copy_generic); #ifdef CONFIG_IPV6 EXPORT_SYMBOL(csum_ipv6_magic); #endif -EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(_ebss); diff --git a/arch/sh/lib/clear_page.S b/arch/sh/lib/clear_page.S index 8342bfbde64c..bee9817e055d 100644 --- a/arch/sh/lib/clear_page.S +++ b/arch/sh/lib/clear_page.S @@ -8,52 +8,6 @@ #include #include -/* - * clear_page - * @to: P1 address - * - * void clear_page(void *to) - */ - -/* - * r0 --- scratch - * r4 --- to - * r5 --- to + PAGE_SIZE - */ -ENTRY(clear_page) - mov r4,r5 - mov.l .Llimit,r0 - add r0,r5 - mov #0,r0 - ! -1: -#if defined(CONFIG_CPU_SH4) - movca.l r0,@r4 - mov r4,r1 -#else - mov.l r0,@r4 -#endif - add #32,r4 - mov.l r0,@-r4 - mov.l r0,@-r4 - mov.l r0,@-r4 - mov.l r0,@-r4 - mov.l r0,@-r4 - mov.l r0,@-r4 - mov.l r0,@-r4 -#if defined(CONFIG_CPU_SH4) - ocbwb @r1 -#endif - cmp/eq r5,r4 - bf/s 1b - add #28,r4 - ! - rts - nop - - .balign 4 -.Llimit: .long (PAGE_SIZE-28) - ENTRY(__clear_user) ! mov #0, r0 diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index 986a1e055834..5c04bbb08d36 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 @@ -31,7 +31,7 @@ tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o obj-y += $(tlb-y) ifndef CONFIG_CACHE_OFF obj-$(CONFIG_CPU_SH4) += pg-sh4.o -obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh7705.o +obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh4.o endif endif diff --git a/arch/sh/mm/pg-nommu.c b/arch/sh/mm/pg-nommu.c index 91ed4e695ff7..7e33b486b7e9 100644 --- a/arch/sh/mm/pg-nommu.c +++ b/arch/sh/mm/pg-nommu.c @@ -1,7 +1,7 @@ /* * arch/sh/mm/pg-nommu.c * - * clear_page()/copy_page() implementation for MMUless SH. + * copy_page()/__copy_user()/__clear_user() implementations for MMUless SH. * * Copyright (C) 2003 Paul Mundt * @@ -20,11 +20,6 @@ void copy_page(void *to, void *from) memcpy(to, from, PAGE_SIZE); } -void clear_page(void *to) -{ - memset(to, 0, PAGE_SIZE); -} - __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n) { memcpy(to, from, n); diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index f3c4b2a54fc7..4d93070b8220 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c @@ -2,7 +2,7 @@ * arch/sh/mm/pg-sh4.c * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka - * Copyright (C) 2002 - 2007 Paul Mundt + * Copyright (C) 2002 - 2009 Paul Mundt * * Released under the terms of the GNU GPL v2.0. */ @@ -58,20 +58,6 @@ static inline void kunmap_coherent(struct page *page) preempt_check_resched(); } -/* - * clear_user_page - * @to: P1 address - * @address: U0 address to be mapped - * @page: page (virt_to_page(to)) - */ -void clear_user_page(void *to, unsigned long address, struct page *page) -{ - clear_page(to); - - if (pages_do_alias((unsigned long)to, address & PAGE_MASK)) - __flush_wback_region(to, PAGE_SIZE); -} - void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) @@ -128,3 +114,16 @@ void copy_user_highpage(struct page *to, struct page *from, smp_wmb(); } EXPORT_SYMBOL(copy_user_highpage); + +void clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *kaddr = kmap_atomic(page, KM_USER0); + + clear_page(kaddr); + + if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) + __flush_wback_region(kaddr, PAGE_SIZE); + + kunmap_atomic(kaddr, KM_USER0); +} +EXPORT_SYMBOL(clear_user_highpage); diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c deleted file mode 100644 index 684891b5c8c0..000000000000 --- a/arch/sh/mm/pg-sh7705.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * arch/sh/mm/pg-sh7705.c - * - * Copyright (C) 1999, 2000 Niibe Yutaka - * Copyright (C) 2004 Alex Song - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static void __flush_purge_virtual_region(void *p1, void *virt, int size) -{ - unsigned long v; - unsigned long begin, end; - unsigned long p1_begin; - - - begin = L1_CACHE_ALIGN((unsigned long)virt); - end = L1_CACHE_ALIGN((unsigned long)virt + size); - - p1_begin = (unsigned long)p1 & ~(L1_CACHE_BYTES - 1); - - /* do this the slow way as we may not have TLB entries - * for virt yet. */ - for (v = begin; v < end; v += L1_CACHE_BYTES) { - unsigned long p; - unsigned long ways, addr; - - p = __pa(p1_begin); - - ways = current_cpu_data.dcache.ways; - addr = CACHE_OC_ADDRESS_ARRAY; - - do { - unsigned long data; - - addr |= (v & current_cpu_data.dcache.entry_mask); - - data = ctrl_inl(addr); - if ((data & CACHE_PHYSADDR_MASK) == - (p & CACHE_PHYSADDR_MASK)) { - data &= ~(SH_CACHE_UPDATED|SH_CACHE_VALID); - ctrl_outl(data, addr); - } - - addr += current_cpu_data.dcache.way_incr; - } while (--ways); - - p1_begin += L1_CACHE_BYTES; - } -} - -/* - * clear_user_page - * @to: P1 address - * @address: U0 address to be mapped - */ -void clear_user_page(void *to, unsigned long address, struct page *pg) -{ - if (pages_do_alias(address, (unsigned long)to)) - __flush_purge_virtual_region(to, - (void *)(address & 0xfffff000), - PAGE_SIZE); - - clear_page(to); - __flush_wback_region(to, PAGE_SIZE); -} - -/* - * copy_user_page - * @to: P1 address - * @from: P1 address - * @address: U0 address to be mapped - */ -void copy_user_page(void *to, void *from, unsigned long address, struct page *pg) -{ - if (pages_do_alias(address, (unsigned long)to)) - __flush_purge_virtual_region(to, - (void *)(address & 0xfffff000), - PAGE_SIZE); - - copy_page(to, from); - __flush_wback_region(to, PAGE_SIZE); -} -- cgit v1.2.3-59-g8ed1b From 82b242214b6f5b96eb9b76452ac6e2b67dd81abd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 29 Jul 2009 22:43:58 +0900 Subject: Revert "sh: Bump the earlytimer bits back to time_init()." This reverts commit 1d29ebebcb951ab6b04d22807cafb24b893310a2. Bumping up the earlytimer initialization causes IRQs to be enabled too early, which blows up lockdep: ... NR_IRQS:256 nr_irqs:256 ------------[ cut here ]------------ Badness at kernel/lockdep.c:2128 Pid : 0, Comm: swapper CPU : 0 Not tainted (2.6.31-rc3-00205-g3ed6e12-dirty #2443) PC is at trace_hardirqs_on_caller+0x48/0x10c PR is at trace_hardirqs_on_caller+0x3c/0x10c ... Revert it back to late_time_init time, which fixes up lockdep. Signed-off-by: Paul Mundt --- arch/sh/kernel/time.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index d2424b068b7b..7f95f479060f 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c @@ -92,6 +92,21 @@ module_init(rtc_generic_init); void (*board_time_init)(void); +static void __init sh_late_time_init(void) +{ + /* + * Make sure all compiled-in early timers register themselves. + * + * Run probe() for two "earlytimer" devices, these will be the + * clockevents and clocksource devices respectively. In the event + * that only a clockevents device is available, we -ENODEV on the + * clocksource and the jiffies clocksource is used transparently + * instead. No error handling is necessary here. + */ + early_platform_driver_register_all("earlytimer"); + early_platform_driver_probe("earlytimer", 2, 0); +} + void __init time_init(void) { if (board_time_init) @@ -108,15 +123,5 @@ void __init time_init(void) local_timer_setup(smp_processor_id()); #endif - /* - * Make sure all compiled-in early timers register themselves. - * - * Run probe() for two "earlytimer" devices, these will be the - * clockevents and clocksource devices respectively. In the event - * that only a clockevents device is available, we -ENODEV on the - * clocksource and the jiffies clocksource is used transparently - * instead. No error handling is necessary here. - */ - early_platform_driver_register_all("earlytimer"); - early_platform_driver_probe("earlytimer", 2, 0); + late_time_init = sh_late_time_init; } -- cgit v1.2.3-59-g8ed1b From fd78a76aefb5bf28a11d6960d29e03a11db62320 Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Wed, 29 Jul 2009 23:01:24 +0900 Subject: sh: Rework irqflags tracing to fix up CONFIG_PROVE_LOCKING. This cleans up the irqflags tracing code quite a bit and ties it in to various missing callsites that caused an imbalance when CONFIG_PROVE_LOCKING was enabled. Previously this was catching on: 987 #ifdef CONFIG_PROVE_LOCKING 988 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 989 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 990 #endif 991 retval = -EAGAIN; with hardirqs being doubly enabled, and subsequently bailing out with the following call trace: Call trace: [<88035224>] __lock_acquire+0x616/0x6a6 [<88015a8c>] do_fork+0xf8/0x2b0 [<880331ec>] trace_hardirqs_on_caller+0xd4/0x114 [<88241074>] _spin_unlock_irq+0x20/0x64 [<88035224>] __lock_acquire+0x616/0x6a6 [<8800386c>] kernel_thread+0x48/0x70 [<88024ecc>] ____call_usermodehelper+0x0/0x110 [<88024ecc>] ____call_usermodehelper+0x0/0x110 [<88003894>] kernel_thread_helper+0x0/0x14 [<88024bac>] __call_usermodehelper+0x38/0x70 [<88025dc0>] worker_thread+0x150/0x274 [<88035b9c>] lock_release+0x0/0x198 [<88024b74>] __call_usermodehelper+0x0/0x70 [<88028cf0>] autoremove_wake_function+0x0/0x30 [<88028bf2>] kthread+0x3e/0x70 [<88025c70>] worker_thread+0x0/0x274 [<8800389c>] kernel_thread_helper+0x8/0x14 [<88028bb4>] kthread+0x0/0x70 [<88003894>] kernel_thread_helper+0x0/0x14 Reported-by: Nobuhiro Iwamatsu Signed-off-by: Stuart Menefy Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/Kconfig.debug | 2 +- arch/sh/include/asm/entry-macros.S | 72 ++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/entry-common.S | 63 +++++++++------------------------ arch/sh/kernel/io_trapped.c | 7 ++-- drivers/serial/sh-sci.c | 5 +-- 5 files changed, 96 insertions(+), 53 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index b440fd936714..a6dce41296e7 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -38,7 +38,7 @@ config EARLY_SCIF_CONSOLE_PORT default "0xffe00000" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7763 || \ CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7366 || \ CPU_SUBTYPE_SH7343 - default "0xffea0000" if CPU_SUBTYPE_SH7785 + default "0xffeb0000" if CPU_SUBTYPE_SH7785 default "0xffeb0000" if CPU_SUBTYPE_SH7786 default "0xfffe8000" if CPU_SUBTYPE_SH7203 default "0xfffe9800" if CPU_SUBTYPE_SH7206 || CPU_SUBTYPE_SH7263 diff --git a/arch/sh/include/asm/entry-macros.S b/arch/sh/include/asm/entry-macros.S index 3a4752a65722..1bdd93891cd7 100644 --- a/arch/sh/include/asm/entry-macros.S +++ b/arch/sh/include/asm/entry-macros.S @@ -31,6 +31,78 @@ #endif .endm +#ifdef CONFIG_TRACE_IRQFLAGS + + .macro TRACE_IRQS_ON + mov.l r0, @-r15 + mov.l r1, @-r15 + mov.l r2, @-r15 + mov.l r3, @-r15 + mov.l r4, @-r15 + mov.l r5, @-r15 + mov.l r6, @-r15 + mov.l r7, @-r15 + + mov.l 7834f, r0 + jsr @r0 + nop + + mov.l @r15+, r7 + mov.l @r15+, r6 + mov.l @r15+, r5 + mov.l @r15+, r4 + mov.l @r15+, r3 + mov.l @r15+, r2 + mov.l @r15+, r1 + mov.l @r15+, r0 + mov.l 7834f, r0 + + bra 7835f + nop + .balign 4 +7834: .long trace_hardirqs_on +7835: + .endm + .macro TRACE_IRQS_OFF + + mov.l r0, @-r15 + mov.l r1, @-r15 + mov.l r2, @-r15 + mov.l r3, @-r15 + mov.l r4, @-r15 + mov.l r5, @-r15 + mov.l r6, @-r15 + mov.l r7, @-r15 + + mov.l 7834f, r0 + jsr @r0 + nop + + mov.l @r15+, r7 + mov.l @r15+, r6 + mov.l @r15+, r5 + mov.l @r15+, r4 + mov.l @r15+, r3 + mov.l @r15+, r2 + mov.l @r15+, r1 + mov.l @r15+, r0 + mov.l 7834f, r0 + + bra 7835f + nop + .balign 4 +7834: .long trace_hardirqs_off +7835: + .endm + +#else + .macro TRACE_IRQS_ON + .endm + + .macro TRACE_IRQS_OFF + .endm +#endif + #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4) # define PREF(x) pref @x #else diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index d62175650c54..fc26ccd82789 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -45,7 +45,7 @@ */ #if defined(CONFIG_PREEMPT) -# define preempt_stop() cli +# define preempt_stop() cli ; TRACE_IRQS_OFF #else # define preempt_stop() # define resume_kernel __restore_all @@ -55,11 +55,7 @@ .align 2 ENTRY(exception_error) ! -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 2f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_ON sti mov.l 1f, r0 jmp @r0 @@ -67,22 +63,23 @@ ENTRY(exception_error) .align 2 1: .long do_exception_error -#ifdef CONFIG_TRACE_IRQFLAGS -2: .long trace_hardirqs_on -#endif .align 2 ret_from_exception: preempt_stop() -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 4f, r0 - jsr @r0 - nop -#endif ENTRY(ret_from_irq) ! mov #OFF_SR, r0 mov.l @(r0,r15), r0 ! get status register + + shlr2 r0 + and #0x3c, r0 + cmp/eq #0x3c, r0 + bt 9f + TRACE_IRQS_ON +9: + mov #OFF_SR, r0 + mov.l @(r0,r15), r0 ! get status register shll r0 shll r0 ! kernel space? get_current_thread_info r8, r0 @@ -125,11 +122,7 @@ noresched: ENTRY(resume_userspace) ! r8: current_thread_info cli -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_OfF mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags tst #(_TIF_WORK_MASK & 0xff), r0 bt/s __restore_all @@ -156,11 +149,7 @@ work_resched: jsr @r1 ! schedule nop cli -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_OFF ! mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags tst #(_TIF_WORK_MASK & 0xff), r0 @@ -172,10 +161,6 @@ work_resched: 1: .long schedule 2: .long do_notify_resume 3: .long resume_userspace -#ifdef CONFIG_TRACE_IRQFLAGS -4: .long trace_hardirqs_on -5: .long trace_hardirqs_off -#endif .align 2 syscall_exit_work: @@ -184,11 +169,7 @@ syscall_exit_work: tst #(_TIF_WORK_SYSCALL_MASK & 0xff), r0 bt/s work_pending tst #_TIF_NEED_RESCHED, r0 -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_ON sti mov r15, r4 mov.l 8f, r0 ! do_syscall_trace_leave @@ -321,11 +302,7 @@ ENTRY(system_call) bt/s debug_trap ! it's a debug trap.. nop -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 5f, r10 - jsr @r10 - nop -#endif + TRACE_IRQS_ON sti ! @@ -355,11 +332,7 @@ syscall_call: ! syscall_exit: cli -#ifdef CONFIG_TRACE_IRQFLAGS - mov.l 6f, r0 - jsr @r0 - nop -#endif + TRACE_IRQS_OFF ! get_current_thread_info r8, r0 mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags @@ -377,9 +350,5 @@ syscall_exit: #endif 2: .long NR_syscalls 3: .long sys_call_table -#ifdef CONFIG_TRACE_IRQFLAGS -5: .long trace_hardirqs_on -6: .long trace_hardirqs_off -#endif 7: .long do_syscall_trace_enter 8: .long do_syscall_trace_leave diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c index 77dfecb64373..e27a19e1f46e 100644 --- a/arch/sh/kernel/io_trapped.c +++ b/arch/sh/kernel/io_trapped.c @@ -112,14 +112,15 @@ void __iomem *match_trapped_io_handler(struct list_head *list, struct trapped_io *tiop; struct resource *res; int k, len; + unsigned long flags; - spin_lock_irq(&trapped_lock); + spin_lock_irqsave(&trapped_lock, flags); list_for_each_entry(tiop, list, list) { voffs = 0; for (k = 0; k < tiop->num_resources; k++) { res = tiop->resource + k; if (res->start == offset) { - spin_unlock_irq(&trapped_lock); + spin_unlock_irqrestore(&trapped_lock, flags); return tiop->virt_base + voffs; } @@ -127,7 +128,7 @@ void __iomem *match_trapped_io_handler(struct list_head *list, voffs += roundup(len, PAGE_SIZE); } } - spin_unlock_irq(&trapped_lock); + spin_unlock_irqrestore(&trapped_lock, flags); return NULL; } EXPORT_SYMBOL_GPL(match_trapped_io_handler); diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 8e2feb563347..4cbb87ad070a 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -662,10 +662,11 @@ static irqreturn_t sci_rx_interrupt(int irq, void *port) static irqreturn_t sci_tx_interrupt(int irq, void *ptr) { struct uart_port *port = ptr; + unsigned long flags; - spin_lock_irq(&port->lock); + spin_lock_irqsave(&port->lock, flags); sci_transmit_chars(port); - spin_unlock_irq(&port->lock); + spin_unlock_irqrestore(&port->lock, flags); return IRQ_HANDLED; } -- cgit v1.2.3-59-g8ed1b From 05aa7882757f68af799140142ec44f83b2df4298 Mon Sep 17 00:00:00 2001 From: Rafael Ignacio Zurita Date: Tue, 4 Aug 2009 14:38:08 +0900 Subject: sh: Add early printk support for SH770x CPUs. This adds early printk support for SH770x (tested on SH7709 based hp6xx). Signed-off-by: Rafael Ignacio Zurita Signed-off-by: Paul Mundt --- arch/sh/Kconfig.debug | 1 + arch/sh/kernel/early_printk.c | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index a6dce41296e7..763b792b1611 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -43,6 +43,7 @@ config EARLY_SCIF_CONSOLE_PORT default "0xfffe8000" if CPU_SUBTYPE_SH7203 default "0xfffe9800" if CPU_SUBTYPE_SH7206 || CPU_SUBTYPE_SH7263 default "0xffe80000" if CPU_SH4 + default "0xa4000150" if CPU_SH3 default "0x00000000" config EARLY_PRINTK diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c index a952dcf9999d..64f2746baf91 100644 --- a/arch/sh/kernel/early_printk.c +++ b/arch/sh/kernel/early_printk.c @@ -134,7 +134,7 @@ static void scif_sercon_init(char *s) sci_out(&scif_port, SCFCR, 0x0030); /* TTRG=b'11 */ sci_out(&scif_port, SCSCR, 0x0030); /* TE, RE */ } -#elif defined(CONFIG_CPU_SH4) +#elif defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3) #define DEFAULT_BAUD 115200 /* * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4 @@ -220,10 +220,8 @@ static int __init setup_early_printk(char *buf) early_console = &scif_console; #if !defined(CONFIG_SH_STANDARD_BIOS) -#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SUBTYPE_SH7720) || \ - defined(CONFIG_CPU_SUBTYPE_SH7721) +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3) scif_sercon_init(buf + 6); -#endif #endif } #endif -- cgit v1.2.3-59-g8ed1b From 6ba4a8f0f542e791e4158c91a844234b142578dc Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 06:57:36 +0000 Subject: sh: hwblk support for sh7724 This patch adds hwblk support for the sh7724 processor. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/cpu-sh4/cpu/sh7724.h | 17 +++++ arch/sh/kernel/cpu/sh4a/Makefile | 2 +- arch/sh/kernel/cpu/sh4a/clock-sh7724.c | 119 ++++++++++++++++---------------- arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c | 121 +++++++++++++++++++++++++++++++++ 4 files changed, 201 insertions(+), 58 deletions(-) create mode 100644 arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/cpu-sh4/cpu/sh7724.h b/arch/sh/include/cpu-sh4/cpu/sh7724.h index 66fd1184359e..0cd1f71a1116 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7724.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7724.h @@ -266,4 +266,21 @@ enum { GPIO_FN_INTC_IRQ1, GPIO_FN_INTC_IRQ0, }; +enum { + HWBLK_UNKNOWN = 0, + HWBLK_TLB, HWBLK_IC, HWBLK_OC, HWBLK_RSMEM, HWBLK_ILMEM, HWBLK_L2C, + HWBLK_FPU, HWBLK_INTC, HWBLK_DMAC0, HWBLK_SHYWAY, + HWBLK_HUDI, HWBLK_DBG, HWBLK_UBC, + HWBLK_TMU0, HWBLK_CMT, HWBLK_RWDT, HWBLK_DMAC1, HWBLK_TMU1, + HWBLK_SCIF0, HWBLK_SCIF1, HWBLK_SCIF2, HWBLK_SCIF3, + HWBLK_SCIF4, HWBLK_SCIF5, HWBLK_MSIOF0, HWBLK_MSIOF1, + HWBLK_KEYSC, HWBLK_RTC, HWBLK_IIC0, HWBLK_IIC1, + HWBLK_MMC, HWBLK_ETHER, HWBLK_ATAPI, HWBLK_TPU, HWBLK_IRDA, + HWBLK_TSIF, HWBLK_USB1, HWBLK_USB0, HWBLK_2DG, + HWBLK_SDHI0, HWBLK_SDHI1, HWBLK_VEU1, HWBLK_CEU1, HWBLK_BEU1, + HWBLK_2DDMAC, HWBLK_SPU, HWBLK_JPU, HWBLK_VOU, + HWBLK_BEU0, HWBLK_CEU0, HWBLK_VEU0, HWBLK_VPU, HWBLK_LCDC, + HWBLK_NR, +}; + #endif /* __ASM_SH7724_H__ */ diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 1d7ae38bc611..12cddf4c721d 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -27,7 +27,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o hwblk-sh7722.o clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o hwblk-sh7723.o -clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o +clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o hwblk-sh7724.o clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index 5d5c9b952883..ba24e38c9fc7 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c @@ -22,6 +22,8 @@ #include #include #include +#include +#include /* SH7724 registers */ #define FRQCRA 0xa4150000 @@ -156,64 +158,67 @@ struct clk div6_clks[] = { SH_CLK_DIV6("spu_clk", &div3_clk, SPUCLKCR, 0), }; -#define MSTP(_str, _parent, _reg, _bit, _force_on, _need_cpg, _need_ram) \ - SH_CLK_MSTP32(_str, -1, _parent, _reg, _bit, _force_on * CLK_ENABLE_ON_INIT) +#define R_CLK (&r_clk) +#define P_CLK (&div4_clks[DIV4_P]) +#define B_CLK (&div4_clks[DIV4_B]) +#define I_CLK (&div4_clks[DIV4_I]) +#define SH_CLK (&div4_clks[DIV4_SH]) static struct clk mstp_clks[] = { - MSTP("tlb0", &div4_clks[DIV4_I], MSTPCR0, 31, 1, 1, 0), - MSTP("ic0", &div4_clks[DIV4_I], MSTPCR0, 30, 1, 1, 0), - MSTP("oc0", &div4_clks[DIV4_I], MSTPCR0, 29, 1, 1, 0), - MSTP("rs0", &div4_clks[DIV4_B], MSTPCR0, 28, 1, 1, 0), - MSTP("ilmem0", &div4_clks[DIV4_I], MSTPCR0, 27, 1, 1, 0), - MSTP("l2c0", &div4_clks[DIV4_SH], MSTPCR0, 26, 1, 1, 0), - MSTP("fpu0", &div4_clks[DIV4_I], MSTPCR0, 24, 1, 1, 0), - MSTP("intc0", &div4_clks[DIV4_P], MSTPCR0, 22, 1, 1, 0), - MSTP("dmac0", &div4_clks[DIV4_B], MSTPCR0, 21, 0, 1, 1), - MSTP("sh0", &div4_clks[DIV4_SH], MSTPCR0, 20, 0, 1, 0), - MSTP("hudi0", &div4_clks[DIV4_P], MSTPCR0, 19, 0, 1, 0), - MSTP("ubc0", &div4_clks[DIV4_I], MSTPCR0, 17, 0, 1, 0), - MSTP("tmu0", &div4_clks[DIV4_P], MSTPCR0, 15, 0, 1, 0), - MSTP("cmt0", &r_clk, MSTPCR0, 14, 0, 0, 0), - MSTP("rwdt0", &r_clk, MSTPCR0, 13, 0, 0, 0), - MSTP("dmac1", &div4_clks[DIV4_B], MSTPCR0, 12, 0, 1, 1), - MSTP("tmu1", &div4_clks[DIV4_P], MSTPCR0, 10, 0, 1, 0), - MSTP("scif0", &div4_clks[DIV4_P], MSTPCR0, 9, 0, 1, 0), - MSTP("scif1", &div4_clks[DIV4_P], MSTPCR0, 8, 0, 1, 0), - MSTP("scif2", &div4_clks[DIV4_P], MSTPCR0, 7, 0, 1, 0), - MSTP("scif3", &div4_clks[DIV4_B], MSTPCR0, 6, 0, 1, 0), - MSTP("scif4", &div4_clks[DIV4_B], MSTPCR0, 5, 0, 1, 0), - MSTP("scif5", &div4_clks[DIV4_B], MSTPCR0, 4, 0, 1, 0), - MSTP("msiof0", &div4_clks[DIV4_B], MSTPCR0, 2, 0, 1, 0), - MSTP("msiof1", &div4_clks[DIV4_B], MSTPCR0, 1, 0, 1, 0), - - MSTP("keysc0", &r_clk, MSTPCR1, 12, 0, 0, 0), - MSTP("rtc0", &r_clk, MSTPCR1, 11, 0, 0, 0), - MSTP("i2c0", &div4_clks[DIV4_P], MSTPCR1, 9, 0, 1, 0), - MSTP("i2c1", &div4_clks[DIV4_P], MSTPCR1, 8, 0, 1, 0), - - MSTP("mmc0", &div4_clks[DIV4_B], MSTPCR2, 29, 0, 1, 0), - MSTP("eth0", &div4_clks[DIV4_B], MSTPCR2, 28, 0, 1, 0), - MSTP("atapi0", &div4_clks[DIV4_B], MSTPCR2, 26, 0, 1, 0), - MSTP("tpu0", &div4_clks[DIV4_B], MSTPCR2, 25, 0, 1, 0), - MSTP("irda0", &div4_clks[DIV4_P], MSTPCR2, 24, 0, 1, 0), - MSTP("tsif0", &div4_clks[DIV4_B], MSTPCR2, 22, 0, 1, 0), - MSTP("usb1", &div4_clks[DIV4_B], MSTPCR2, 21, 0, 1, 1), - MSTP("usb0", &div4_clks[DIV4_B], MSTPCR2, 20, 0, 1, 1), - MSTP("2dg0", &div4_clks[DIV4_B], MSTPCR2, 19, 0, 1, 1), - MSTP("sdhi0", &div4_clks[DIV4_B], MSTPCR2, 18, 0, 1, 0), - MSTP("sdhi1", &div4_clks[DIV4_B], MSTPCR2, 17, 0, 1, 0), - MSTP("veu1", &div4_clks[DIV4_B], MSTPCR2, 15, 1, 1, 1), - MSTP("ceu1", &div4_clks[DIV4_B], MSTPCR2, 13, 0, 1, 1), - MSTP("beu1", &div4_clks[DIV4_B], MSTPCR2, 12, 0, 1, 1), - MSTP("2ddmac0", &div4_clks[DIV4_SH], MSTPCR2, 10, 0, 1, 1), - MSTP("spu0", &div4_clks[DIV4_B], MSTPCR2, 9, 0, 1, 0), - MSTP("jpu0", &div4_clks[DIV4_B], MSTPCR2, 6, 1, 1, 1), - MSTP("vou0", &div4_clks[DIV4_B], MSTPCR2, 5, 0, 1, 1), - MSTP("beu0", &div4_clks[DIV4_B], MSTPCR2, 4, 0, 1, 1), - MSTP("ceu0", &div4_clks[DIV4_B], MSTPCR2, 3, 0, 1, 1), - MSTP("veu0", &div4_clks[DIV4_B], MSTPCR2, 2, 1, 1, 1), - MSTP("vpu0", &div4_clks[DIV4_B], MSTPCR2, 1, 1, 1, 1), - MSTP("lcdc0", &div4_clks[DIV4_B], MSTPCR2, 0, 0, 1, 1), + SH_HWBLK_CLK("tlb0", -1, I_CLK, HWBLK_TLB, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("ic0", -1, I_CLK, HWBLK_IC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("oc0", -1, I_CLK, HWBLK_OC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("rs0", -1, B_CLK, HWBLK_RSMEM, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("ilmem0", -1, I_CLK, HWBLK_ILMEM, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("l2c0", -1, SH_CLK, HWBLK_L2C, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("fpu0", -1, I_CLK, HWBLK_FPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("intc0", -1, P_CLK, HWBLK_INTC, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("dmac0", -1, B_CLK, HWBLK_DMAC0, 0), + SH_HWBLK_CLK("sh0", -1, SH_CLK, HWBLK_SHYWAY, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("hudi0", -1, P_CLK, HWBLK_HUDI, 0), + SH_HWBLK_CLK("ubc0", -1, I_CLK, HWBLK_UBC, 0), + SH_HWBLK_CLK("tmu0", -1, P_CLK, HWBLK_TMU0, 0), + SH_HWBLK_CLK("cmt0", -1, R_CLK, HWBLK_CMT, 0), + SH_HWBLK_CLK("rwdt0", -1, R_CLK, HWBLK_RWDT, 0), + SH_HWBLK_CLK("dmac1", -1, B_CLK, HWBLK_DMAC1, 0), + SH_HWBLK_CLK("tmu1", -1, P_CLK, HWBLK_TMU1, 0), + SH_HWBLK_CLK("scif0", -1, P_CLK, HWBLK_SCIF0, 0), + SH_HWBLK_CLK("scif1", -1, P_CLK, HWBLK_SCIF1, 0), + SH_HWBLK_CLK("scif2", -1, P_CLK, HWBLK_SCIF2, 0), + SH_HWBLK_CLK("scif3", -1, B_CLK, HWBLK_SCIF3, 0), + SH_HWBLK_CLK("scif4", -1, B_CLK, HWBLK_SCIF4, 0), + SH_HWBLK_CLK("scif5", -1, B_CLK, HWBLK_SCIF5, 0), + SH_HWBLK_CLK("msiof0", -1, B_CLK, HWBLK_MSIOF0, 0), + SH_HWBLK_CLK("msiof1", -1, B_CLK, HWBLK_MSIOF1, 0), + + SH_HWBLK_CLK("keysc0", -1, R_CLK, HWBLK_KEYSC, 0), + SH_HWBLK_CLK("rtc0", -1, R_CLK, HWBLK_RTC, 0), + SH_HWBLK_CLK("i2c0", -1, P_CLK, HWBLK_IIC0, 0), + SH_HWBLK_CLK("i2c1", -1, P_CLK, HWBLK_IIC1, 0), + + SH_HWBLK_CLK("mmc0", -1, B_CLK, HWBLK_MMC, 0), + SH_HWBLK_CLK("eth0", -1, B_CLK, HWBLK_ETHER, 0), + SH_HWBLK_CLK("atapi0", -1, B_CLK, HWBLK_ATAPI, 0), + SH_HWBLK_CLK("tpu0", -1, B_CLK, HWBLK_TPU, 0), + SH_HWBLK_CLK("irda0", -1, P_CLK, HWBLK_IRDA, 0), + SH_HWBLK_CLK("tsif0", -1, B_CLK, HWBLK_TSIF, 0), + SH_HWBLK_CLK("usb1", -1, B_CLK, HWBLK_USB1, 0), + SH_HWBLK_CLK("usb0", -1, B_CLK, HWBLK_USB0, 0), + SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0), + SH_HWBLK_CLK("sdhi0", -1, B_CLK, HWBLK_SDHI0, 0), + SH_HWBLK_CLK("sdhi1", -1, B_CLK, HWBLK_SDHI1, 0), + SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU1, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("ceu1", -1, B_CLK, HWBLK_CEU1, 0), + SH_HWBLK_CLK("beu1", -1, B_CLK, HWBLK_BEU1, 0), + SH_HWBLK_CLK("2ddmac0", -1, SH_CLK, HWBLK_2DDMAC, 0), + SH_HWBLK_CLK("spu0", -1, B_CLK, HWBLK_SPU, 0), + SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0), + SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU0, 0), + SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU0, 0), + SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU0, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0), }; int __init arch_clk_init(void) @@ -236,7 +241,7 @@ int __init arch_clk_init(void) ret = sh_clk_div6_register(div6_clks, ARRAY_SIZE(div6_clks)); if (!ret) - ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); + ret = sh_hwblk_clk_register(mstp_clks, ARRAY_SIZE(mstp_clks)); return ret; } diff --git a/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c b/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c new file mode 100644 index 000000000000..1613ad6013c3 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c @@ -0,0 +1,121 @@ +/* + * arch/sh/kernel/cpu/sh4a/hwblk-sh7724.c + * + * SH7724 hardware block support + * + * Copyright (C) 2009 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include +#include +#include +#include +#include + +/* SH7724 registers */ +#define MSTPCR0 0xa4150030 +#define MSTPCR1 0xa4150034 +#define MSTPCR2 0xa4150038 + +/* SH7724 Power Domains */ +enum { CORE_AREA, SUB_AREA, CORE_AREA_BM }; +static struct hwblk_area sh7724_hwblk_area[] = { + [CORE_AREA] = HWBLK_AREA(0, 0), + [CORE_AREA_BM] = HWBLK_AREA(HWBLK_AREA_FLAG_PARENT, CORE_AREA), + [SUB_AREA] = HWBLK_AREA(0, 0), +}; + +/* Table mapping HWBLK to Module Stop Bit and Power Domain */ +static struct hwblk sh7724_hwblk[HWBLK_NR] = { + [HWBLK_TLB] = HWBLK(MSTPCR0, 31, CORE_AREA), + [HWBLK_IC] = HWBLK(MSTPCR0, 30, CORE_AREA), + [HWBLK_OC] = HWBLK(MSTPCR0, 29, CORE_AREA), + [HWBLK_RSMEM] = HWBLK(MSTPCR0, 28, CORE_AREA), + [HWBLK_ILMEM] = HWBLK(MSTPCR0, 27, CORE_AREA), + [HWBLK_L2C] = HWBLK(MSTPCR0, 26, CORE_AREA), + [HWBLK_FPU] = HWBLK(MSTPCR0, 24, CORE_AREA), + [HWBLK_INTC] = HWBLK(MSTPCR0, 22, CORE_AREA), + [HWBLK_DMAC0] = HWBLK(MSTPCR0, 21, CORE_AREA_BM), + [HWBLK_SHYWAY] = HWBLK(MSTPCR0, 20, CORE_AREA), + [HWBLK_HUDI] = HWBLK(MSTPCR0, 19, CORE_AREA), + [HWBLK_DBG] = HWBLK(MSTPCR0, 18, CORE_AREA), + [HWBLK_UBC] = HWBLK(MSTPCR0, 17, CORE_AREA), + [HWBLK_TMU0] = HWBLK(MSTPCR0, 15, CORE_AREA), + [HWBLK_CMT] = HWBLK(MSTPCR0, 14, SUB_AREA), + [HWBLK_RWDT] = HWBLK(MSTPCR0, 13, SUB_AREA), + [HWBLK_DMAC1] = HWBLK(MSTPCR0, 12, CORE_AREA_BM), + [HWBLK_TMU1] = HWBLK(MSTPCR0, 10, CORE_AREA), + [HWBLK_SCIF0] = HWBLK(MSTPCR0, 9, CORE_AREA), + [HWBLK_SCIF1] = HWBLK(MSTPCR0, 8, CORE_AREA), + [HWBLK_SCIF2] = HWBLK(MSTPCR0, 7, CORE_AREA), + [HWBLK_SCIF3] = HWBLK(MSTPCR0, 6, CORE_AREA), + [HWBLK_SCIF4] = HWBLK(MSTPCR0, 5, CORE_AREA), + [HWBLK_SCIF5] = HWBLK(MSTPCR0, 4, CORE_AREA), + [HWBLK_MSIOF0] = HWBLK(MSTPCR0, 2, CORE_AREA), + [HWBLK_MSIOF1] = HWBLK(MSTPCR0, 1, CORE_AREA), + + [HWBLK_KEYSC] = HWBLK(MSTPCR1, 12, SUB_AREA), + [HWBLK_RTC] = HWBLK(MSTPCR1, 11, SUB_AREA), + [HWBLK_IIC0] = HWBLK(MSTPCR1, 9, CORE_AREA), + [HWBLK_IIC1] = HWBLK(MSTPCR1, 8, CORE_AREA), + + [HWBLK_MMC] = HWBLK(MSTPCR2, 29, CORE_AREA), + [HWBLK_ETHER] = HWBLK(MSTPCR2, 28, CORE_AREA_BM), + [HWBLK_ATAPI] = HWBLK(MSTPCR2, 26, CORE_AREA_BM), + [HWBLK_TPU] = HWBLK(MSTPCR2, 25, CORE_AREA), + [HWBLK_IRDA] = HWBLK(MSTPCR2, 24, CORE_AREA), + [HWBLK_TSIF] = HWBLK(MSTPCR2, 22, CORE_AREA), + [HWBLK_USB1] = HWBLK(MSTPCR2, 21, CORE_AREA), + [HWBLK_USB0] = HWBLK(MSTPCR2, 20, CORE_AREA), + [HWBLK_2DG] = HWBLK(MSTPCR2, 19, CORE_AREA_BM), + [HWBLK_SDHI0] = HWBLK(MSTPCR2, 18, CORE_AREA), + [HWBLK_SDHI1] = HWBLK(MSTPCR2, 17, CORE_AREA), + [HWBLK_VEU1] = HWBLK(MSTPCR2, 15, CORE_AREA_BM), + [HWBLK_CEU1] = HWBLK(MSTPCR2, 13, CORE_AREA_BM), + [HWBLK_BEU1] = HWBLK(MSTPCR2, 12, CORE_AREA_BM), + [HWBLK_2DDMAC] = HWBLK(MSTPCR2, 10, CORE_AREA_BM), + [HWBLK_SPU] = HWBLK(MSTPCR2, 9, CORE_AREA_BM), + [HWBLK_JPU] = HWBLK(MSTPCR2, 6, CORE_AREA_BM), + [HWBLK_VOU] = HWBLK(MSTPCR2, 5, CORE_AREA_BM), + [HWBLK_BEU0] = HWBLK(MSTPCR2, 4, CORE_AREA_BM), + [HWBLK_CEU0] = HWBLK(MSTPCR2, 3, CORE_AREA_BM), + [HWBLK_VEU0] = HWBLK(MSTPCR2, 2, CORE_AREA_BM), + [HWBLK_VPU] = HWBLK(MSTPCR2, 1, CORE_AREA_BM), + [HWBLK_LCDC] = HWBLK(MSTPCR2, 0, CORE_AREA_BM), +}; + +static struct hwblk_info sh7724_hwblk_info = { + .areas = sh7724_hwblk_area, + .nr_areas = ARRAY_SIZE(sh7724_hwblk_area), + .hwblks = sh7724_hwblk, + .nr_hwblks = ARRAY_SIZE(sh7724_hwblk), +}; + +int arch_hwblk_sleep_mode(void) +{ + if (!sh7724_hwblk_area[CORE_AREA].cnt[HWBLK_CNT_USAGE]) + return SUSP_SH_STANDBY | SUSP_SH_SF; + + if (!sh7724_hwblk_area[CORE_AREA_BM].cnt[HWBLK_CNT_USAGE]) + return SUSP_SH_SLEEP | SUSP_SH_SF; + + return SUSP_SH_SLEEP; +} + +int __init arch_hwblk_init(void) +{ + return hwblk_register(&sh7724_hwblk_info); +} -- cgit v1.2.3-59-g8ed1b From 133b170f08d6c20578f25b1ae71f80a5e638ccb6 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:01:36 +0000 Subject: sh: clean up MSTPCRn register definitions This patch removes the unused MSTPCRn register definitions from the SuperH Mobile code for sh7722, sh7723 and sh7724. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/clock-sh7722.c | 3 --- arch/sh/kernel/cpu/sh4a/clock-sh7723.c | 3 --- arch/sh/kernel/cpu/sh4a/clock-sh7724.c | 3 --- 3 files changed, 9 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c index 1fa9e1dd1cc8..5b1bbbe63b1b 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c @@ -32,9 +32,6 @@ #define SCLKBCR 0xa415000c #define IRDACLKCR 0xa4150018 #define PLLCR 0xa4150024 -#define MSTPCR0 0xa4150030 -#define MSTPCR1 0xa4150034 -#define MSTPCR2 0xa4150038 #define DLLFRQ 0xa4150050 /* Fixed 32 KHz root clock for RTC and Power Management purposes */ diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c index bf64c78eee34..e5c63911403c 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c @@ -32,9 +32,6 @@ #define SCLKBCR 0xa415000c #define IRDACLKCR 0xa4150018 #define PLLCR 0xa4150024 -#define MSTPCR0 0xa4150030 -#define MSTPCR1 0xa4150034 -#define MSTPCR2 0xa4150038 #define DLLFRQ 0xa4150050 /* Fixed 32 KHz root clock for RTC and Power Management purposes */ diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index ba24e38c9fc7..34611d97378e 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c @@ -33,9 +33,6 @@ #define FCLKBCR 0xa415000c #define IRDACLKCR 0xa4150018 #define PLLCR 0xa4150024 -#define MSTPCR0 0xa4150030 -#define MSTPCR1 0xa4150034 -#define MSTPCR2 0xa4150038 #define SPUCLKCR 0xa415003c #define FLLFRQ 0xa4150050 #define LSTATS 0xa4150060 -- cgit v1.2.3-59-g8ed1b From f69d5782742e57d5f047ed13440bcf1b320cb074 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:47:27 +0000 Subject: sh: Runtime PM pdev hwblk - sh7722 Add hwblk_id to on-chip sh7722 platform devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7722.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c index 67b0d87fcb27..35097753456c 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -17,6 +17,7 @@ #include #include #include +#include static struct resource rtc_resources[] = { [0] = { @@ -46,6 +47,9 @@ static struct platform_device rtc_device = { .id = -1, .num_resources = ARRAY_SIZE(rtc_resources), .resource = rtc_resources, + .archdata = { + .hwblk_id = HWBLK_RTC, + }, }; static struct m66592_platdata usbf_platdata = { @@ -76,6 +80,9 @@ static struct platform_device usbf_device = { }, .num_resources = ARRAY_SIZE(usbf_resources), .resource = usbf_resources, + .archdata = { + .hwblk_id = HWBLK_USBF, + }, }; static struct resource iic_resources[] = { @@ -97,6 +104,9 @@ static struct platform_device iic_device = { .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic_resources), .resource = iic_resources, + .archdata = { + .hwblk_id = HWBLK_IIC, + }, }; static struct uio_info vpu_platform_data = { @@ -125,6 +135,9 @@ static struct platform_device vpu_device = { }, .resource = vpu_resources, .num_resources = ARRAY_SIZE(vpu_resources), + .archdata = { + .hwblk_id = HWBLK_VPU, + }, }; static struct uio_info veu_platform_data = { @@ -153,6 +166,9 @@ static struct platform_device veu_device = { }, .resource = veu_resources, .num_resources = ARRAY_SIZE(veu_resources), + .archdata = { + .hwblk_id = HWBLK_VEU, + }, }; static struct uio_info jpu_platform_data = { @@ -181,6 +197,9 @@ static struct platform_device jpu_device = { }, .resource = jpu_resources, .num_resources = ARRAY_SIZE(jpu_resources), + .archdata = { + .hwblk_id = HWBLK_JPU, + }, }; static struct sh_timer_config cmt_platform_data = { @@ -213,6 +232,9 @@ static struct platform_device cmt_device = { }, .resource = cmt_resources, .num_resources = ARRAY_SIZE(cmt_resources), + .archdata = { + .hwblk_id = HWBLK_CMT, + }, }; static struct sh_timer_config tmu0_platform_data = { @@ -244,6 +266,9 @@ static struct platform_device tmu0_device = { }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), + .archdata = { + .hwblk_id = HWBLK_TMU, + }, }; static struct sh_timer_config tmu1_platform_data = { @@ -275,6 +300,9 @@ static struct platform_device tmu1_device = { }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), + .archdata = { + .hwblk_id = HWBLK_TMU, + }, }; static struct sh_timer_config tmu2_platform_data = { @@ -305,6 +333,9 @@ static struct platform_device tmu2_device = { }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), + .archdata = { + .hwblk_id = HWBLK_TMU, + }, }; static struct plat_sci_port sci_platform_data[] = { -- cgit v1.2.3-59-g8ed1b From 09d21f9c83c49afd04850b8701772c09954054e0 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:47:53 +0000 Subject: sh: Runtime PM pdev hwblk - sh7723 Add hwblk_id to on-chip sh7723 platform devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7723.c | 40 ++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c index 26dc4d323252..4caa5a7ca86e 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c @@ -18,6 +18,7 @@ #include #include #include +#include static struct uio_info vpu_platform_data = { .name = "VPU5", @@ -45,6 +46,9 @@ static struct platform_device vpu_device = { }, .resource = vpu_resources, .num_resources = ARRAY_SIZE(vpu_resources), + .archdata = { + .hwblk_id = HWBLK_VPU, + }, }; static struct uio_info veu0_platform_data = { @@ -73,6 +77,9 @@ static struct platform_device veu0_device = { }, .resource = veu0_resources, .num_resources = ARRAY_SIZE(veu0_resources), + .archdata = { + .hwblk_id = HWBLK_VEU2H0, + }, }; static struct uio_info veu1_platform_data = { @@ -101,6 +108,9 @@ static struct platform_device veu1_device = { }, .resource = veu1_resources, .num_resources = ARRAY_SIZE(veu1_resources), + .archdata = { + .hwblk_id = HWBLK_VEU2H1, + }, }; static struct sh_timer_config cmt_platform_data = { @@ -133,6 +143,9 @@ static struct platform_device cmt_device = { }, .resource = cmt_resources, .num_resources = ARRAY_SIZE(cmt_resources), + .archdata = { + .hwblk_id = HWBLK_CMT, + }, }; static struct sh_timer_config tmu0_platform_data = { @@ -164,6 +177,9 @@ static struct platform_device tmu0_device = { }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), + .archdata = { + .hwblk_id = HWBLK_TMU0, + }, }; static struct sh_timer_config tmu1_platform_data = { @@ -195,6 +211,9 @@ static struct platform_device tmu1_device = { }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), + .archdata = { + .hwblk_id = HWBLK_TMU0, + }, }; static struct sh_timer_config tmu2_platform_data = { @@ -225,6 +244,9 @@ static struct platform_device tmu2_device = { }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), + .archdata = { + .hwblk_id = HWBLK_TMU0, + }, }; static struct sh_timer_config tmu3_platform_data = { @@ -255,6 +277,9 @@ static struct platform_device tmu3_device = { }, .resource = tmu3_resources, .num_resources = ARRAY_SIZE(tmu3_resources), + .archdata = { + .hwblk_id = HWBLK_TMU1, + }, }; static struct sh_timer_config tmu4_platform_data = { @@ -285,6 +310,9 @@ static struct platform_device tmu4_device = { }, .resource = tmu4_resources, .num_resources = ARRAY_SIZE(tmu4_resources), + .archdata = { + .hwblk_id = HWBLK_TMU1, + }, }; static struct sh_timer_config tmu5_platform_data = { @@ -315,6 +343,9 @@ static struct platform_device tmu5_device = { }, .resource = tmu5_resources, .num_resources = ARRAY_SIZE(tmu5_resources), + .archdata = { + .hwblk_id = HWBLK_TMU1, + }, }; static struct plat_sci_port sci_platform_data[] = { @@ -395,6 +426,9 @@ static struct platform_device rtc_device = { .id = -1, .num_resources = ARRAY_SIZE(rtc_resources), .resource = rtc_resources, + .archdata = { + .hwblk_id = HWBLK_RTC, + }, }; static struct r8a66597_platdata r8a66597_data = { @@ -424,6 +458,9 @@ static struct platform_device sh7723_usb_host_device = { }, .num_resources = ARRAY_SIZE(sh7723_usb_host_resources), .resource = sh7723_usb_host_resources, + .archdata = { + .hwblk_id = HWBLK_USB, + }, }; static struct resource iic_resources[] = { @@ -445,6 +482,9 @@ static struct platform_device iic_device = { .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic_resources), .resource = iic_resources, + .archdata = { + .hwblk_id = HWBLK_IIC, + }, }; static struct platform_device *sh7723_devices[] __initdata = { -- cgit v1.2.3-59-g8ed1b From 593a0c898ac2f09f001d536f699966ec4bc1d25f Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 31 Jul 2009 07:48:11 +0000 Subject: sh: Runtime PM pdev hwblk - sh7724 Add hwblk_id to on-chip sh7724 platform devices. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-sh7724.c | 43 ++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c index a04edaab9a29..f3851fd757ec 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c @@ -22,6 +22,7 @@ #include #include #include +#include /* Serial */ static struct plat_sci_port sci_platform_data[] = { @@ -103,6 +104,9 @@ static struct platform_device rtc_device = { .id = -1, .num_resources = ARRAY_SIZE(rtc_resources), .resource = rtc_resources, + .archdata = { + .hwblk_id = HWBLK_RTC, + }, }; /* I2C0 */ @@ -125,6 +129,9 @@ static struct platform_device iic0_device = { .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic0_resources), .resource = iic0_resources, + .archdata = { + .hwblk_id = HWBLK_IIC0, + }, }; /* I2C1 */ @@ -147,6 +154,9 @@ static struct platform_device iic1_device = { .id = 1, /* "i2c1" clock */ .num_resources = ARRAY_SIZE(iic1_resources), .resource = iic1_resources, + .archdata = { + .hwblk_id = HWBLK_IIC1, + }, }; /* VPU */ @@ -176,6 +186,9 @@ static struct platform_device vpu_device = { }, .resource = vpu_resources, .num_resources = ARRAY_SIZE(vpu_resources), + .archdata = { + .hwblk_id = HWBLK_VPU, + }, }; /* VEU0 */ @@ -205,6 +218,9 @@ static struct platform_device veu0_device = { }, .resource = veu0_resources, .num_resources = ARRAY_SIZE(veu0_resources), + .archdata = { + .hwblk_id = HWBLK_VEU0, + }, }; /* VEU1 */ @@ -234,6 +250,9 @@ static struct platform_device veu1_device = { }, .resource = veu1_resources, .num_resources = ARRAY_SIZE(veu1_resources), + .archdata = { + .hwblk_id = HWBLK_VEU1, + }, }; static struct sh_timer_config cmt_platform_data = { @@ -266,6 +285,9 @@ static struct platform_device cmt_device = { }, .resource = cmt_resources, .num_resources = ARRAY_SIZE(cmt_resources), + .archdata = { + .hwblk_id = HWBLK_CMT, + }, }; static struct sh_timer_config tmu0_platform_data = { @@ -297,6 +319,9 @@ static struct platform_device tmu0_device = { }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), + .archdata = { + .hwblk_id = HWBLK_TMU0, + }, }; static struct sh_timer_config tmu1_platform_data = { @@ -328,6 +353,9 @@ static struct platform_device tmu1_device = { }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), + .archdata = { + .hwblk_id = HWBLK_TMU0, + }, }; static struct sh_timer_config tmu2_platform_data = { @@ -358,6 +386,9 @@ static struct platform_device tmu2_device = { }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), + .archdata = { + .hwblk_id = HWBLK_TMU0, + }, }; @@ -389,6 +420,9 @@ static struct platform_device tmu3_device = { }, .resource = tmu3_resources, .num_resources = ARRAY_SIZE(tmu3_resources), + .archdata = { + .hwblk_id = HWBLK_TMU1, + }, }; static struct sh_timer_config tmu4_platform_data = { @@ -419,6 +453,9 @@ static struct platform_device tmu4_device = { }, .resource = tmu4_resources, .num_resources = ARRAY_SIZE(tmu4_resources), + .archdata = { + .hwblk_id = HWBLK_TMU1, + }, }; static struct sh_timer_config tmu5_platform_data = { @@ -449,6 +486,9 @@ static struct platform_device tmu5_device = { }, .resource = tmu5_resources, .num_resources = ARRAY_SIZE(tmu5_resources), + .archdata = { + .hwblk_id = HWBLK_TMU1, + }, }; /* JPU */ @@ -478,6 +518,9 @@ static struct platform_device jpu_device = { }, .resource = jpu_resources, .num_resources = ARRAY_SIZE(jpu_resources), + .archdata = { + .hwblk_id = HWBLK_JPU, + }, }; static struct platform_device *sh7724_devices[] __initdata = { -- cgit v1.2.3-59-g8ed1b From 11d82905e0159c07fe2d1bfe5e7d80e4cea333ce Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 15:54:33 +0900 Subject: sh: Fix up early printk build error. Missing endif in the early printk case, fix it up.. Signed-off-by: Paul Mundt --- arch/sh/kernel/early_printk.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c index 64f2746baf91..81a46145ffa5 100644 --- a/arch/sh/kernel/early_printk.c +++ b/arch/sh/kernel/early_printk.c @@ -222,6 +222,7 @@ static int __init setup_early_printk(char *buf) #if !defined(CONFIG_SH_STANDARD_BIOS) #if defined(CONFIG_CPU_SH4) || defined(CONFIG_CPU_SH3) scif_sercon_init(buf + 6); +#endif #endif } #endif -- cgit v1.2.3-59-g8ed1b From c7914834ef3b8a396b7e82ea34ac07cdcfe6f868 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 17:14:39 +0900 Subject: sh: Tidy up NEFF-based sign extension for SH-5. This consolidates all of the NEFF-based sign extension for SH-5. In the future the other SH code will need to make use of this as well, so make it generic in preparation for more 32/64 consolidation. Signed-off-by: Paul Mundt --- arch/sh/include/asm/pgtable.h | 6 ++++++ arch/sh/kernel/process_64.c | 24 ++++++++++-------------- arch/sh/kernel/signal_64.c | 38 +++++++++++++------------------------- arch/sh/mm/fault_64.c | 11 +---------- arch/sh/mm/tlb-sh5.c | 21 +++++---------------- arch/sh/mm/tlbflush_64.c | 2 +- 6 files changed, 36 insertions(+), 66 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 43ef3e99fdd1..3cd7127af957 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h @@ -36,6 +36,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define NEFF_SIGN (1LL << (NEFF - 1)) #define NEFF_MASK (-1LL << NEFF) +static inline unsigned long long neff_sign_extend(unsigned long val) +{ + unsigned long long extended = val; + return (extended & NEFF_SIGN) ? (extended | NEFF_MASK) : extended; +} + #ifdef CONFIG_29BIT #define NPHYS 29 #else diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c index 24de74214940..1192398ef582 100644 --- a/arch/sh/kernel/process_64.c +++ b/arch/sh/kernel/process_64.c @@ -425,7 +425,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, struct task_struct *p, struct pt_regs *regs) { struct pt_regs *childregs; - unsigned long long se; /* Sign extension */ #ifdef CONFIG_SH_FPU if(last_task_used_math == current) { @@ -441,11 +440,19 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, *childregs = *regs; + /* + * Sign extend the edited stack. + * Note that thread.pc and thread.pc will stay + * 32-bit wide and context switch must take care + * of NEFF sign extension. + */ if (user_mode(regs)) { - childregs->regs[15] = usp; + childregs->regs[15] = neff_sign_extend(usp); p->thread.uregs = childregs; } else { - childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE; + childregs->regs[15] = + neff_sign_extend((unsigned long)task_stack_page(p) + + THREAD_SIZE); } childregs->regs[9] = 0; /* Set return value for child */ @@ -454,17 +461,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, p->thread.sp = (unsigned long) childregs; p->thread.pc = (unsigned long) ret_from_fork; - /* - * Sign extend the edited stack. - * Note that thread.pc and thread.pc will stay - * 32-bit wide and context switch must take care - * of NEFF sign extension. - */ - - se = childregs->regs[15]; - se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se; - childregs->regs[15] = se; - return 0; } diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 0663a0ee6021..026fd1cfe17d 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c @@ -561,13 +561,11 @@ static int setup_frame(int sig, struct k_sigaction *ka, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { - DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1; - /* * On SH5 all edited pointers are subject to NEFF */ - DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? - (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + DEREF_REG_PR = neff_sign_extend((unsigned long) + ka->sa.sa_restorer | 0x1); } else { /* * Different approach on SH5. @@ -580,9 +578,8 @@ static int setup_frame(int sig, struct k_sigaction *ka, * . being code, linker turns ShMedia bit on, always * dereference index -1. */ - DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; - DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? - (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + DEREF_REG_PR = neff_sign_extend((unsigned long) + frame->retcode | 0x01); if (__copy_to_user(frame->retcode, (void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0) @@ -596,9 +593,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, * Set up registers for signal handler. * All edited pointers are subject to NEFF. */ - regs->regs[REG_SP] = (unsigned long) frame; - regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ? - (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; + regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame); regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ /* FIXME: @@ -613,8 +608,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc; regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc; - regs->pc = (unsigned long) ka->sa.sa_handler; - regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc; + regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler); set_fs(USER_DS); @@ -676,13 +670,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { - DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1; - /* * On SH5 all edited pointers are subject to NEFF */ - DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? - (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + DEREF_REG_PR = neff_sign_extend((unsigned long) + ka->sa.sa_restorer | 0x1); } else { /* * Different approach on SH5. @@ -695,15 +687,14 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * . being code, linker turns ShMedia bit on, always * dereference index -1. */ - - DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; - DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? - (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + DEREF_REG_PR = neff_sign_extend((unsigned long) + frame->retcode | 0x01); if (__copy_to_user(frame->retcode, (void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0) goto give_sigsegv; + /* Cohere the trampoline with the I-cache. */ flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15); } @@ -711,14 +702,11 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * Set up registers for signal handler. * All edited pointers are subject to NEFF. */ - regs->regs[REG_SP] = (unsigned long) frame; - regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ? - (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; + regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame); regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info; regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext; - regs->pc = (unsigned long) ka->sa.sa_handler; - regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc; + regs->pc = neff_sign_extend((unsigned long)ka->sa.sa_handler); set_fs(USER_DS); diff --git a/arch/sh/mm/fault_64.c b/arch/sh/mm/fault_64.c index bd63b961b2a9..2b356cec2489 100644 --- a/arch/sh/mm/fault_64.c +++ b/arch/sh/mm/fault_64.c @@ -56,16 +56,7 @@ inline void __do_tlb_refill(unsigned long address, /* * Set PTEH register */ - pteh = address & MMU_VPN_MASK; - - /* Sign extend based on neff. */ -#if (NEFF == 32) - /* Faster sign extension */ - pteh = (unsigned long long)(signed long long)(signed long)pteh; -#else - /* General case */ - pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh; -#endif + pteh = neff_sign_extend(address & MMU_VPN_MASK); /* Set the ASID. */ pteh |= get_asid() << PTEH_ASID_SHIFT; diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c index dae131243bcc..fdb64e41ec50 100644 --- a/arch/sh/mm/tlb-sh5.c +++ b/arch/sh/mm/tlb-sh5.c @@ -117,26 +117,15 @@ int sh64_put_wired_dtlb_entry(unsigned long long entry) * Load up a virtual<->physical translation for @eaddr<->@paddr in the * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry). */ -inline void sh64_setup_tlb_slot(unsigned long long config_addr, - unsigned long eaddr, - unsigned long asid, - unsigned long paddr) +void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, + unsigned long asid, unsigned long paddr) { unsigned long long pteh, ptel; - /* Sign extension */ -#if (NEFF == 32) - pteh = (unsigned long long)(signed long long)(signed long) eaddr; -#else -#error "Can't sign extend more than 32 bits yet" -#endif + pteh = neff_sign_extend(eaddr); pteh &= PAGE_MASK; pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID; -#if (NEFF == 32) - ptel = (unsigned long long)(signed long long)(signed long) paddr; -#else -#error "Can't sign extend more than 32 bits yet" -#endif + ptel = neff_sign_extend(paddr); ptel &= PAGE_MASK; ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE); @@ -152,5 +141,5 @@ inline void sh64_setup_tlb_slot(unsigned long long config_addr, * * Teardown any existing mapping in the TLB slot @config_addr. */ -inline void sh64_teardown_tlb_slot(unsigned long long config_addr) +void sh64_teardown_tlb_slot(unsigned long long config_addr) __attribute__ ((alias("__flush_tlb_slot"))); diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index f2e44e9ffb75..fa5a95a062d0 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -337,7 +337,7 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) /* * Sign-extend based on neff. */ - lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page; + lpage = neff_sign_extend(page); match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID; match |= lpage; -- cgit v1.2.3-59-g8ed1b From d14d751ff9234595639a16e53b3cf0c575946bde Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 4 Aug 2009 17:17:00 +0900 Subject: sh64: Kill off special clear_page() implementation. This can use the now generic clear_page() implementation, which is backed by the sh64 optimized memset routine. This also fixes up the case where PAGE_SIZE != 4kB. Signed-off-by: Paul Mundt --- arch/sh/kernel/sh_ksyms_64.c | 1 - arch/sh/lib64/Makefile | 2 +- arch/sh/lib64/clear_page.S | 54 -------------------------------------------- 3 files changed, 1 insertion(+), 56 deletions(-) delete mode 100644 arch/sh/lib64/clear_page.S (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index f5bd156ea504..f96c95c07c4b 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c @@ -52,7 +52,6 @@ EXPORT_SYMBOL(__get_user_asm_l); EXPORT_SYMBOL(__get_user_asm_q); EXPORT_SYMBOL(__strnlen_user); EXPORT_SYMBOL(__strncpy_from_user); -EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__copy_user); diff --git a/arch/sh/lib64/Makefile b/arch/sh/lib64/Makefile index 334bb2da36ea..1fee75aa1f98 100644 --- a/arch/sh/lib64/Makefile +++ b/arch/sh/lib64/Makefile @@ -11,7 +11,7 @@ # Panic should really be compiled as PIC lib-y := udelay.o dbg.o panic.o memcpy.o memset.o \ - copy_user_memcpy.o copy_page.o clear_page.o strcpy.o strlen.o + copy_user_memcpy.o copy_page.o strcpy.o strlen.o # Extracted from libgcc lib-y += udivsi3.o udivdi3.o sdivsi3.o diff --git a/arch/sh/lib64/clear_page.S b/arch/sh/lib64/clear_page.S deleted file mode 100644 index 007ab48ecc1c..000000000000 --- a/arch/sh/lib64/clear_page.S +++ /dev/null @@ -1,54 +0,0 @@ -/* - Copyright 2003 Richard Curnow, SuperH (UK) Ltd. - - This file is subject to the terms and conditions of the GNU General Public - License. See the file "COPYING" in the main directory of this archive - for more details. - - Tight version of memset for the case of just clearing a page. It turns out - that having the alloco's spaced out slightly due to the increment/branch - pair causes them to contend less for access to the cache. Similarly, - keeping the stores apart from the allocos causes less contention. => Do two - separate loops. Do multiple stores per loop to amortise the - increment/branch cost a little. - - Parameters: - r2 : source effective address (start of page) - - Always clears 4096 bytes. - - Note : alloco guarded by synco to avoid TAKum03020 erratum - -*/ - - .section .text..SHmedia32,"ax" - .little - - .balign 8 - .global clear_page -clear_page: - pta/l 1f, tr1 - pta/l 2f, tr2 - ptabs/l r18, tr0 - - movi 4096, r7 - add r2, r7, r7 - add r2, r63, r6 -1: - alloco r6, 0 - synco ! TAKum03020 - addi r6, 32, r6 - bgt/l r7, r6, tr1 - - add r2, r63, r6 -2: - st.q r6, 0, r63 - st.q r6, 8, r63 - st.q r6, 16, r63 - st.q r6, 24, r63 - addi r6, 32, r6 - bgt/l r7, r6, tr2 - - blink tr0, r63 - - -- cgit v1.2.3-59-g8ed1b From 4e14dfc722b8e9e07a355f97aa60a3d9f0739071 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 7 Aug 2009 16:11:19 +0100 Subject: sh: Use the generalized stacktrace ops Copy the stacktrace ops code from x86 and provide a central function for use by functions that need to dump a callstack. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/stacktrace.h | 25 ++++++++ arch/sh/kernel/Makefile_32 | 2 +- arch/sh/kernel/dumpstack.c | 128 +++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/stacktrace.c | 87 ++++++++++++++++++-------- arch/sh/kernel/traps_32.c | 24 -------- arch/sh/oprofile/backtrace.c | 56 +++++++++++------ 6 files changed, 251 insertions(+), 71 deletions(-) create mode 100644 arch/sh/include/asm/stacktrace.h create mode 100644 arch/sh/kernel/dumpstack.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/stacktrace.h b/arch/sh/include/asm/stacktrace.h new file mode 100644 index 000000000000..797018213718 --- /dev/null +++ b/arch/sh/include/asm/stacktrace.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2009 Matt Fleming + * + * Based on: + * The x86 implementation - arch/x86/include/asm/stacktrace.h + */ +#ifndef _ASM_SH_STACKTRACE_H +#define _ASM_SH_STACKTRACE_H + +/* Generic stack tracer with callbacks */ + +struct stacktrace_ops { + void (*warning)(void *data, char *msg); + /* msg must contain %s for the symbol */ + void (*warning_symbol)(void *data, char *msg, unsigned long symbol); + void (*address)(void *data, unsigned long address, int reliable); + /* On negative return stop dumping */ + int (*stack)(void *data, char *name); +}; + +void dump_trace(struct task_struct *tsk, struct pt_regs *regs, + unsigned long *stack, + const struct stacktrace_ops *ops, void *data); + +#endif /* _ASM_SH_STACKTRACE_H */ diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 94ed99b68002..6b32de701a7e 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -9,7 +9,7 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_ftrace.o = -pg endif -obj-y := debugtraps.o idle.o io.o io_generic.o irq.o \ +obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ machvec.o process_32.o ptrace_32.o setup.o signal_32.o \ sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \ traps.o traps_32.o diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c new file mode 100644 index 000000000000..6ab996fc6121 --- /dev/null +++ b/arch/sh/kernel/dumpstack.c @@ -0,0 +1,128 @@ +/* + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs + * Copyright (C) 2009 Matt Fleming + */ +#include +#include +#include + +#include + +void printk_address(unsigned long address, int reliable) +{ + printk(" [<%p>] %s%pS\n", (void *) address, + reliable ? "" : "? ", (void *) address); +} + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static void +print_ftrace_graph_addr(unsigned long addr, void *data, + const struct stacktrace_ops *ops, + struct thread_info *tinfo, int *graph) +{ + struct task_struct *task = tinfo->task; + unsigned long ret_addr; + int index = task->curr_ret_stack; + + if (addr != (unsigned long)return_to_handler) + return; + + if (!task->ret_stack || index < *graph) + return; + + index -= *graph; + ret_addr = task->ret_stack[index].ret; + + ops->address(data, ret_addr, 1); + + (*graph)++; +} +#else +static inline void +print_ftrace_graph_addr(unsigned long addr, void *data, + const struct stacktrace_ops *ops, + struct thread_info *tinfo, int *graph) +{ } +#endif + +/* + * Unwind the call stack and pass information to the stacktrace_ops + * functions. + */ +void dump_trace(struct task_struct *task, struct pt_regs *regs, + unsigned long *sp, const struct stacktrace_ops *ops, + void *data) +{ + struct thread_info *context; + int graph = 0; + + context = (struct thread_info *) + ((unsigned long)sp & (~(THREAD_SIZE - 1))); + + while (!kstack_end(sp)) { + unsigned long addr = *sp++; + + if (__kernel_text_address(addr)) { + ops->address(data, addr, 0); + + print_ftrace_graph_addr(addr, data, ops, + context, &graph); + } + } +} +EXPORT_SYMBOL(dump_trace); + + +static void +print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) +{ + printk(data); + print_symbol(msg, symbol); + printk("\n"); +} + +static void print_trace_warning(void *data, char *msg) +{ + printk("%s%s\n", (char *)data, msg); +} + +static int print_trace_stack(void *data, char *name) +{ + printk("%s <%s> ", (char *)data, name); + return 0; +} + +/* + * Print one address/symbol entries per line. + */ +static void print_trace_address(void *data, unsigned long addr, int reliable) +{ + printk(data); + printk_address(addr, reliable); +} + +static const struct stacktrace_ops print_trace_ops = { + .warning = print_trace_warning, + .warning_symbol = print_trace_warning_symbol, + .stack = print_trace_stack, + .address = print_trace_address, +}; + +void show_trace(struct task_struct *tsk, unsigned long *sp, + struct pt_regs *regs) +{ + if (regs && user_mode(regs)) + return; + + printk("\nCall trace:\n"); + + dump_trace(tsk, regs, sp, &print_trace_ops, ""); + + printk("\n"); + + if (!tsk) + tsk = current; + + debug_show_held_locks(tsk); +} diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index 1a2a5eb76e41..6c24a400b05e 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c @@ -14,46 +14,81 @@ #include #include #include +#include + +static void save_stack_warning(void *data, char *msg) +{ +} + +static void +save_stack_warning_symbol(void *data, char *msg, unsigned long symbol) +{ +} + +static int save_stack_stack(void *data, char *name) +{ + return 0; +} /* * Save stack-backtrace addresses into a stack_trace buffer. */ +static void save_stack_address(void *data, unsigned long addr, int reliable) +{ + struct stack_trace *trace = data; + + if (trace->skip > 0) { + trace->skip--; + return; + } + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = addr; +} + +static const struct stacktrace_ops save_stack_ops = { + .warning = save_stack_warning, + .warning_symbol = save_stack_warning_symbol, + .stack = save_stack_stack, + .address = save_stack_address, +}; + void save_stack_trace(struct stack_trace *trace) { unsigned long *sp = (unsigned long *)current_stack_pointer; - while (!kstack_end(sp)) { - unsigned long addr = *sp++; - - if (__kernel_text_address(addr)) { - if (trace->skip > 0) - trace->skip--; - else - trace->entries[trace->nr_entries++] = addr; - if (trace->nr_entries >= trace->max_entries) - break; - } - } + dump_trace(current, NULL, sp, &save_stack_ops, trace); } EXPORT_SYMBOL_GPL(save_stack_trace); +static void +save_stack_address_nosched(void *data, unsigned long addr, int reliable) +{ + struct stack_trace *trace = (struct stack_trace *)data; + + if (in_sched_functions(addr)) + return; + + if (trace->skip > 0) { + trace->skip--; + return; + } + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = addr; +} + +static const struct stacktrace_ops save_stack_ops_nosched = { + .warning = save_stack_warning, + .warning_symbol = save_stack_warning_symbol, + .stack = save_stack_stack, + .address = save_stack_address_nosched, +}; + void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long *sp = (unsigned long *)tsk->thread.sp; - while (!kstack_end(sp)) { - unsigned long addr = *sp++; - - if (__kernel_text_address(addr)) { - if (in_sched_functions(addr)) - break; - if (trace->skip > 0) - trace->skip--; - else - trace->entries[trace->nr_entries++] = addr; - if (trace->nr_entries >= trace->max_entries) - break; - } - } + dump_trace(current, NULL, sp, &save_stack_ops_nosched, trace); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 2b772776fcda..563426487c6b 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -858,30 +858,6 @@ void __init trap_init(void) per_cpu_trap_init(); } -void show_trace(struct task_struct *tsk, unsigned long *sp, - struct pt_regs *regs) -{ - unsigned long addr; - - if (regs && user_mode(regs)) - return; - - printk("\nCall trace:\n"); - - while (!kstack_end(sp)) { - addr = *sp++; - if (kernel_text_address(addr)) - print_ip_sym(addr); - } - - printk("\n"); - - if (!tsk) - tsk = current; - - debug_show_held_locks(tsk); -} - void show_stack(struct task_struct *tsk, unsigned long *sp) { unsigned long stack; diff --git a/arch/sh/oprofile/backtrace.c b/arch/sh/oprofile/backtrace.c index 9499a2914f89..62e4e4d0273e 100644 --- a/arch/sh/oprofile/backtrace.c +++ b/arch/sh/oprofile/backtrace.c @@ -20,6 +20,39 @@ #include #include #include +#include + +static void backtrace_warning_symbol(void *data, char *msg, + unsigned long symbol) +{ + /* Ignore warnings */ +} + +static void backtrace_warning(void *data, char *msg) +{ + /* Ignore warnings */ +} + +static int backtrace_stack(void *data, char *name) +{ + /* Yes, we want all stacks */ + return 0; +} + +static void backtrace_address(void *data, unsigned long addr, int reliable) +{ + unsigned int *depth = data; + + if ((*depth)--) + oprofile_add_trace(addr); +} + +static struct stacktrace_ops backtrace_ops = { + .warning = backtrace_warning, + .warning_symbol = backtrace_warning_symbol, + .stack = backtrace_stack, + .address = backtrace_address, +}; /* Limit to stop backtracing too far. */ static int backtrace_limit = 20; @@ -74,23 +107,6 @@ static int valid_kernel_stack(unsigned long *stackaddr, struct pt_regs *regs) return ((unsigned long)stackaddr > stack) && ((unsigned long)stackaddr < stack_base); } -static unsigned long * -kernel_backtrace(unsigned long *stackaddr, struct pt_regs *regs) -{ - unsigned long addr; - - /* - * If not a valid kernel address, keep going till we find one - * or the SP stops being a valid address. - */ - do { - addr = *stackaddr++; - oprofile_add_trace(addr); - } while (valid_kernel_stack(stackaddr, regs)); - - return stackaddr; -} - void sh_backtrace(struct pt_regs * const regs, unsigned int depth) { unsigned long *stackaddr; @@ -103,9 +119,9 @@ void sh_backtrace(struct pt_regs * const regs, unsigned int depth) stackaddr = (unsigned long *)regs->regs[15]; if (!user_mode(regs)) { - while (depth-- && valid_kernel_stack(stackaddr, regs)) - stackaddr = kernel_backtrace(stackaddr, regs); - + if (depth) + dump_trace(NULL, regs, stackaddr, + &backtrace_ops, &depth); return; } -- cgit v1.2.3-59-g8ed1b From bf61ad1f870be88676a07bfef69acd59ce10172e Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Thu, 13 Aug 2009 19:49:03 +0900 Subject: sh: Allow multiple stack unwinders to be setup Provide an interface for registering stack unwinders, where each unwinder is given a rating that describes its accuracy and complexity. The more accurate an unwinder is, the more complex it is. If a the current stack unwinder faults, then the stack unwinder with the next highest accuracy will be used in its place (provided one is available). For example, this allows unwinders, such as the DWARF unwinder, to liberally sprinkle BUG()s to catch badly formed DWARF debug info. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/unwinder.h | 25 +++++++ arch/sh/kernel/Makefile_32 | 4 +- arch/sh/kernel/Makefile_64 | 2 +- arch/sh/kernel/unwinder.c | 162 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 190 insertions(+), 3 deletions(-) create mode 100644 arch/sh/include/asm/unwinder.h create mode 100644 arch/sh/kernel/unwinder.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/unwinder.h b/arch/sh/include/asm/unwinder.h new file mode 100644 index 000000000000..3dc551453e28 --- /dev/null +++ b/arch/sh/include/asm/unwinder.h @@ -0,0 +1,25 @@ +#ifndef _LINUX_UNWINDER_H +#define _LINUX_UNWINDER_H + +#include + +struct unwinder { + const char *name; + struct list_head list; + int rating; + void (*dump)(struct task_struct *, struct pt_regs *, + unsigned long *, const struct stacktrace_ops *, void *); +}; + +extern int unwinder_init(void); +extern int unwinder_register(struct unwinder *); + +extern void unwind_stack(struct task_struct *, struct pt_regs *, + unsigned long *, const struct stacktrace_ops *, + void *); + +extern void stack_reader_dump(struct task_struct *, struct pt_regs *, + unsigned long *, const struct stacktrace_ops *, + void *); + +#endif /* _LINUX_UNWINDER_H */ diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 6b32de701a7e..37a3b7704fc6 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -11,8 +11,8 @@ endif obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ machvec.o process_32.o ptrace_32.o setup.o signal_32.o \ - sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \ - traps.o traps_32.o + sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \ + traps.o traps_32.o unwinder.o obj-y += cpu/ obj-$(CONFIG_VSYSCALL) += vsyscall/ diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 index 67b9f6c6326b..00b73e7598cb 100644 --- a/arch/sh/kernel/Makefile_64 +++ b/arch/sh/kernel/Makefile_64 @@ -2,7 +2,7 @@ extra-y := head_64.o init_task.o vmlinux.lds obj-y := debugtraps.o idle.o io.o io_generic.o irq.o machvec.o process_64.o \ ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \ - syscalls_64.o time.o topology.o traps.o traps_64.o + syscalls_64.o time.o topology.o traps.o traps_64.o unwinder.o obj-y += cpu/ obj-$(CONFIG_SMP) += smp.o diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c new file mode 100644 index 000000000000..2b30fa28b440 --- /dev/null +++ b/arch/sh/kernel/unwinder.c @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2009 Matt Fleming + * + * Based, in part, on kernel/time/clocksource.c. + * + * This file provides arbitration code for stack unwinders. + * + * Multiple stack unwinders can be available on a system, usually with + * the most accurate unwinder being the currently active one. + */ +#include +#include +#include +#include +#include + +/* + * This is the most basic stack unwinder an architecture can + * provide. For architectures without reliable frame pointers, e.g. + * RISC CPUs, it can be implemented by looking through the stack for + * addresses that lie within the kernel text section. + * + * Other CPUs, e.g. x86, can use their frame pointer register to + * construct more accurate stack traces. + */ +static struct list_head unwinder_list; +static struct unwinder stack_reader = { + .name = "stack-reader", + .dump = stack_reader_dump, + .rating = 50, + .list = { + .next = &unwinder_list, + .prev = &unwinder_list, + }, +}; + +/* + * "curr_unwinder" points to the stack unwinder currently in use. This + * is the unwinder with the highest rating. + * + * "unwinder_list" is a linked-list of all available unwinders, sorted + * by rating. + * + * All modifications of "curr_unwinder" and "unwinder_list" must be + * performed whilst holding "unwinder_lock". + */ +static struct unwinder *curr_unwinder = &stack_reader; + +static struct list_head unwinder_list = { + .next = &stack_reader.list, + .prev = &stack_reader.list, +}; + +static DEFINE_SPINLOCK(unwinder_lock); + +static atomic_t unwinder_running = ATOMIC_INIT(0); + +/** + * select_unwinder - Select the best registered stack unwinder. + * + * Private function. Must hold unwinder_lock when called. + * + * Select the stack unwinder with the best rating. This is useful for + * setting up curr_unwinder. + */ +static struct unwinder *select_unwinder(void) +{ + struct unwinder *best; + + if (list_empty(&unwinder_list)) + return NULL; + + best = list_entry(unwinder_list.next, struct unwinder, list); + if (best == curr_unwinder) + return NULL; + + return best; +} + +/* + * Enqueue the stack unwinder sorted by rating. + */ +static int unwinder_enqueue(struct unwinder *ops) +{ + struct list_head *tmp, *entry = &unwinder_list; + + list_for_each(tmp, &unwinder_list) { + struct unwinder *o; + + o = list_entry(tmp, struct unwinder, list); + if (o == ops) + return -EBUSY; + /* Keep track of the place, where to insert */ + if (o->rating >= ops->rating) + entry = tmp; + } + list_add(&ops->list, entry); + + return 0; +} + +/** + * unwinder_register - Used to install new stack unwinder + * @u: unwinder to be registered + * + * Install the new stack unwinder on the unwinder list, which is sorted + * by rating. + * + * Returns -EBUSY if registration fails, zero otherwise. + */ +int unwinder_register(struct unwinder *u) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&unwinder_lock, flags); + ret = unwinder_enqueue(u); + if (!ret) + curr_unwinder = select_unwinder(); + spin_unlock_irqrestore(&unwinder_lock, flags); + + return ret; +} + +/* + * Unwind the call stack and pass information to the stacktrace_ops + * functions. Also handle the case where we need to switch to a new + * stack dumper because the current one faulted unexpectedly. + */ +void unwind_stack(struct task_struct *task, struct pt_regs *regs, + unsigned long *sp, const struct stacktrace_ops *ops, + void *data) +{ + unsigned long flags; + + /* + * The problem with unwinders with high ratings is that they are + * inherently more complicated than the simple ones with lower + * ratings. We are therefore more likely to fault in the + * complicated ones, e.g. hitting BUG()s. If we fault in the + * code for the current stack unwinder we try to downgrade to + * one with a lower rating. + * + * Hopefully this will give us a semi-reliable stacktrace so we + * can diagnose why curr_unwinder->dump() faulted. + */ + if (atomic_inc_return(&unwinder_running) != 1) { + spin_lock_irqsave(&unwinder_lock, flags); + + if (!list_is_singular(&unwinder_list)) { + list_del(&curr_unwinder->list); + curr_unwinder = select_unwinder(); + } + + spin_unlock_irqrestore(&unwinder_lock, flags); + atomic_dec(&unwinder_running); + } + + curr_unwinder->dump(task, regs, sp, ops, data); + + atomic_dec(&unwinder_running); +} -- cgit v1.2.3-59-g8ed1b From 0eff9f66de79a0707a9c3a2f8528ccfd62100f0b Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 11 Aug 2009 22:43:20 +0100 Subject: sh: Use the new stack unwinder API Instead of implementing our own stack unwinder via dump_trace() we should use the new stack unwinder API because it is more modular. This change allows us to decouple the interface for generating stacktraces from the implementation of a stack unwinder. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/dumpstack.c | 17 ++++++----------- arch/sh/kernel/stacktrace.c | 5 +++-- arch/sh/oprofile/backtrace.c | 5 +++-- 3 files changed, 12 insertions(+), 15 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index 6ab996fc6121..005dc1d11460 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -6,7 +6,7 @@ #include #include #include - +#include #include void printk_address(unsigned long address, int reliable) @@ -46,13 +46,10 @@ print_ftrace_graph_addr(unsigned long addr, void *data, { } #endif -/* - * Unwind the call stack and pass information to the stacktrace_ops - * functions. - */ -void dump_trace(struct task_struct *task, struct pt_regs *regs, - unsigned long *sp, const struct stacktrace_ops *ops, - void *data) +void +stack_reader_dump(struct task_struct *task, struct pt_regs *regs, + unsigned long *sp, const struct stacktrace_ops *ops, + void *data) { struct thread_info *context; int graph = 0; @@ -71,8 +68,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, } } } -EXPORT_SYMBOL(dump_trace); - static void print_trace_warning_symbol(void *data, char *msg, unsigned long symbol) @@ -117,7 +112,7 @@ void show_trace(struct task_struct *tsk, unsigned long *sp, printk("\nCall trace:\n"); - dump_trace(tsk, regs, sp, &print_trace_ops, ""); + unwind_stack(tsk, regs, sp, &print_trace_ops, ""); printk("\n"); diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index 6c24a400b05e..45b1adde3abe 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -57,7 +58,7 @@ void save_stack_trace(struct stack_trace *trace) { unsigned long *sp = (unsigned long *)current_stack_pointer; - dump_trace(current, NULL, sp, &save_stack_ops, trace); + unwind_stack(current, NULL, sp, &save_stack_ops, trace); } EXPORT_SYMBOL_GPL(save_stack_trace); @@ -89,6 +90,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long *sp = (unsigned long *)tsk->thread.sp; - dump_trace(current, NULL, sp, &save_stack_ops_nosched, trace); + unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/sh/oprofile/backtrace.c b/arch/sh/oprofile/backtrace.c index 62e4e4d0273e..37cd75d7930e 100644 --- a/arch/sh/oprofile/backtrace.c +++ b/arch/sh/oprofile/backtrace.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -120,8 +121,8 @@ void sh_backtrace(struct pt_regs * const regs, unsigned int depth) stackaddr = (unsigned long *)regs->regs[15]; if (!user_mode(regs)) { if (depth) - dump_trace(NULL, regs, stackaddr, - &backtrace_ops, &depth); + unwind_stack(NULL, regs, stackaddr, + &backtrace_ops, &depth); return; } -- cgit v1.2.3-59-g8ed1b From bd353861c735b2265c9d8b2559960c693e7c68ab Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 14 Aug 2009 01:58:43 +0900 Subject: sh: dwarf unwinder support. This is a first cut at a generic DWARF unwinder for the kernel. It's still lacking DWARF64 support and the DWARF expression support hasn't been tested very well but it is generating proper stacktraces on SH for WARN_ON() and NULL dereferences. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/Kconfig.debug | 8 + arch/sh/Makefile | 4 + arch/sh/include/asm/dwarf.h | 407 ++++++++++++++++++ arch/sh/include/asm/sections.h | 1 + arch/sh/include/asm/vmlinux.lds.h | 17 + arch/sh/kernel/Makefile_32 | 1 + arch/sh/kernel/Makefile_64 | 1 + arch/sh/kernel/dwarf.c | 876 ++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/irq.c | 4 + arch/sh/kernel/vmlinux.lds.S | 4 +- 10 files changed, 1322 insertions(+), 1 deletion(-) create mode 100644 arch/sh/include/asm/dwarf.h create mode 100644 arch/sh/include/asm/vmlinux.lds.h create mode 100644 arch/sh/kernel/dwarf.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index 763b792b1611..741d20fab2e1 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -110,6 +110,14 @@ config DUMP_CODE Those looking for more verbose debugging output should say Y. +config DWARF_UNWINDER + bool "Enable the DWARF unwinder for stacktraces" + select FRAME_POINTER + default n + help + Enabling this option will make stacktraces more accurate, at + the cost of an increase in overall kernel size. + config SH_NO_BSS_INIT bool "Avoid zeroing BSS (to speed-up startup on suitable platforms)" depends on DEBUG_KERNEL diff --git a/arch/sh/Makefile b/arch/sh/Makefile index b6ff337fd856..e26421bf9976 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -191,6 +191,10 @@ ifeq ($(CONFIG_MCOUNT),y) KBUILD_CFLAGS += -pg endif +ifeq ($(CONFIG_DWARF_UNWINDER),y) + KBUILD_CFLAGS += -fasynchronous-unwind-tables +endif + libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h new file mode 100644 index 000000000000..23eba12880b3 --- /dev/null +++ b/arch/sh/include/asm/dwarf.h @@ -0,0 +1,407 @@ +/* + * Copyright (C) 2009 Matt Fleming + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + */ +#ifndef __ASM_SH_DWARF_H +#define __ASM_SH_DWARF_H + +#ifdef CONFIG_DWARF_UNWINDER + +/* + * DWARF expression operations + */ +#define DW_OP_addr 0x03 +#define DW_OP_deref 0x06 +#define DW_OP_const1u 0x08 +#define DW_OP_const1s 0x09 +#define DW_OP_const2u 0x0a +#define DW_OP_const2s 0x0b +#define DW_OP_const4u 0x0c +#define DW_OP_const4s 0x0d +#define DW_OP_const8u 0x0e +#define DW_OP_const8s 0x0f +#define DW_OP_constu 0x10 +#define DW_OP_consts 0x11 +#define DW_OP_dup 0x12 +#define DW_OP_drop 0x13 +#define DW_OP_over 0x14 +#define DW_OP_pick 0x15 +#define DW_OP_swap 0x16 +#define DW_OP_rot 0x17 +#define DW_OP_xderef 0x18 +#define DW_OP_abs 0x19 +#define DW_OP_and 0x1a +#define DW_OP_div 0x1b +#define DW_OP_minus 0x1c +#define DW_OP_mod 0x1d +#define DW_OP_mul 0x1e +#define DW_OP_neg 0x1f +#define DW_OP_not 0x20 +#define DW_OP_or 0x21 +#define DW_OP_plus 0x22 +#define DW_OP_plus_uconst 0x23 +#define DW_OP_shl 0x24 +#define DW_OP_shr 0x25 +#define DW_OP_shra 0x26 +#define DW_OP_xor 0x27 +#define DW_OP_skip 0x2f +#define DW_OP_bra 0x28 +#define DW_OP_eq 0x29 +#define DW_OP_ge 0x2a +#define DW_OP_gt 0x2b +#define DW_OP_le 0x2c +#define DW_OP_lt 0x2d +#define DW_OP_ne 0x2e +#define DW_OP_lit0 0x30 +#define DW_OP_lit1 0x31 +#define DW_OP_lit2 0x32 +#define DW_OP_lit3 0x33 +#define DW_OP_lit4 0x34 +#define DW_OP_lit5 0x35 +#define DW_OP_lit6 0x36 +#define DW_OP_lit7 0x37 +#define DW_OP_lit8 0x38 +#define DW_OP_lit9 0x39 +#define DW_OP_lit10 0x3a +#define DW_OP_lit11 0x3b +#define DW_OP_lit12 0x3c +#define DW_OP_lit13 0x3d +#define DW_OP_lit14 0x3e +#define DW_OP_lit15 0x3f +#define DW_OP_lit16 0x40 +#define DW_OP_lit17 0x41 +#define DW_OP_lit18 0x42 +#define DW_OP_lit19 0x43 +#define DW_OP_lit20 0x44 +#define DW_OP_lit21 0x45 +#define DW_OP_lit22 0x46 +#define DW_OP_lit23 0x47 +#define DW_OP_lit24 0x48 +#define DW_OP_lit25 0x49 +#define DW_OP_lit26 0x4a +#define DW_OP_lit27 0x4b +#define DW_OP_lit28 0x4c +#define DW_OP_lit29 0x4d +#define DW_OP_lit30 0x4e +#define DW_OP_lit31 0x4f +#define DW_OP_reg0 0x50 +#define DW_OP_reg1 0x51 +#define DW_OP_reg2 0x52 +#define DW_OP_reg3 0x53 +#define DW_OP_reg4 0x54 +#define DW_OP_reg5 0x55 +#define DW_OP_reg6 0x56 +#define DW_OP_reg7 0x57 +#define DW_OP_reg8 0x58 +#define DW_OP_reg9 0x59 +#define DW_OP_reg10 0x5a +#define DW_OP_reg11 0x5b +#define DW_OP_reg12 0x5c +#define DW_OP_reg13 0x5d +#define DW_OP_reg14 0x5e +#define DW_OP_reg15 0x5f +#define DW_OP_reg16 0x60 +#define DW_OP_reg17 0x61 +#define DW_OP_reg18 0x62 +#define DW_OP_reg19 0x63 +#define DW_OP_reg20 0x64 +#define DW_OP_reg21 0x65 +#define DW_OP_reg22 0x66 +#define DW_OP_reg23 0x67 +#define DW_OP_reg24 0x68 +#define DW_OP_reg25 0x69 +#define DW_OP_reg26 0x6a +#define DW_OP_reg27 0x6b +#define DW_OP_reg28 0x6c +#define DW_OP_reg29 0x6d +#define DW_OP_reg30 0x6e +#define DW_OP_reg31 0x6f +#define DW_OP_breg0 0x70 +#define DW_OP_breg1 0x71 +#define DW_OP_breg2 0x72 +#define DW_OP_breg3 0x73 +#define DW_OP_breg4 0x74 +#define DW_OP_breg5 0x75 +#define DW_OP_breg6 0x76 +#define DW_OP_breg7 0x77 +#define DW_OP_breg8 0x78 +#define DW_OP_breg9 0x79 +#define DW_OP_breg10 0x7a +#define DW_OP_breg11 0x7b +#define DW_OP_breg12 0x7c +#define DW_OP_breg13 0x7d +#define DW_OP_breg14 0x7e +#define DW_OP_breg15 0x7f +#define DW_OP_breg16 0x80 +#define DW_OP_breg17 0x81 +#define DW_OP_breg18 0x82 +#define DW_OP_breg19 0x83 +#define DW_OP_breg20 0x84 +#define DW_OP_breg21 0x85 +#define DW_OP_breg22 0x86 +#define DW_OP_breg23 0x87 +#define DW_OP_breg24 0x88 +#define DW_OP_breg25 0x89 +#define DW_OP_breg26 0x8a +#define DW_OP_breg27 0x8b +#define DW_OP_breg28 0x8c +#define DW_OP_breg29 0x8d +#define DW_OP_breg30 0x8e +#define DW_OP_breg31 0x8f +#define DW_OP_regx 0x90 +#define DW_OP_fbreg 0x91 +#define DW_OP_bregx 0x92 +#define DW_OP_piece 0x93 +#define DW_OP_deref_size 0x94 +#define DW_OP_xderef_size 0x95 +#define DW_OP_nop 0x96 +#define DW_OP_push_object_address 0x97 +#define DW_OP_call2 0x98 +#define DW_OP_call4 0x99 +#define DW_OP_call_ref 0x9a +#define DW_OP_form_tls_address 0x9b +#define DW_OP_call_frame_cfa 0x9c +#define DW_OP_bit_piece 0x9d +#define DW_OP_lo_user 0xe0 +#define DW_OP_hi_user 0xff + +/* + * Addresses used in FDE entries in the .eh_frame section may be encoded + * using one of the following encodings. + */ +#define DW_EH_PE_absptr 0x00 +#define DW_EH_PE_omit 0xff +#define DW_EH_PE_uleb128 0x01 +#define DW_EH_PE_udata2 0x02 +#define DW_EH_PE_udata4 0x03 +#define DW_EH_PE_udata8 0x04 +#define DW_EH_PE_sleb128 0x09 +#define DW_EH_PE_sdata2 0x0a +#define DW_EH_PE_sdata4 0x0b +#define DW_EH_PE_sdata8 0x0c +#define DW_EH_PE_signed 0x09 + +#define DW_EH_PE_pcrel 0x10 + +/* + * The architecture-specific register number that contains the return + * address in the .debug_frame table. + */ +#define DWARF_ARCH_RA_REG 17 + +/* + * At what offset into dwarf_unwind_stack() is DWARF_ARCH_RA_REG setup? + */ +#define DWARF_ARCH_UNWIND_OFFSET 0x20 + +#ifndef __ASSEMBLY__ +/* + * Read either the frame pointer (r14) or the stack pointer (r15). + * NOTE: this MUST be inlined. + */ +static __always_inline unsigned long dwarf_read_arch_reg(unsigned int reg) +{ + unsigned long value; + + switch (reg) { + case 14: + __asm__ __volatile__("mov r14, %0\n" : "=r" (value)); + break; + case 15: + __asm__ __volatile__("mov r15, %0\n" : "=r" (value)); + break; + default: + BUG(); + } + + return value; +} + +/** + * dwarf_cie - Common Information Entry + */ +struct dwarf_cie { + unsigned long length; + unsigned long cie_id; + unsigned char version; + const char *augmentation; + unsigned int code_alignment_factor; + int data_alignment_factor; + + /* Which column in the rule table represents return addr of func. */ + unsigned int return_address_reg; + + unsigned char *initial_instructions; + unsigned char *instructions_end; + + unsigned char encoding; + + unsigned long cie_pointer; + + struct list_head link; + + unsigned long flags; +#define DWARF_CIE_Z_AUGMENTATION (1 << 0) +}; + +/** + * dwarf_fde - Frame Description Entry + */ +struct dwarf_fde { + unsigned long length; + unsigned long cie_pointer; + struct dwarf_cie *cie; + unsigned long initial_location; + unsigned long address_range; + unsigned char *instructions; + unsigned char *end; + struct list_head link; +}; + +/** + * dwarf_frame - DWARF information for a frame in the call stack + */ +struct dwarf_frame { + struct dwarf_frame *prev, *next; + + unsigned long pc; + + struct dwarf_reg *regs; + unsigned int num_regs; /* how many regs are allocated? */ + + unsigned int depth; /* what level are we in the callstack? */ + + unsigned long cfa; + + /* Valid when DW_FRAME_CFA_REG_OFFSET is set in flags */ + unsigned int cfa_register; + unsigned int cfa_offset; + + /* Valid when DW_FRAME_CFA_REG_EXP is set in flags */ + unsigned char *cfa_expr; + unsigned int cfa_expr_len; + + unsigned long flags; +#define DWARF_FRAME_CFA_REG_OFFSET (1 << 0) +#define DWARF_FRAME_CFA_REG_EXP (1 << 1) + + unsigned long return_addr; +}; + +/** + * dwarf_reg - DWARF register + * @flags: Describes how to calculate the value of this register + */ +struct dwarf_reg { + unsigned long addr; + unsigned long flags; +#define DWARF_REG_OFFSET (1 << 0) +}; + +/** + * dwarf_stack - a DWARF stack contains a collection of DWARF frames + * @depth: the number of frames in the stack + * @level: an array of DWARF frames, indexed by stack level + * + */ +struct dwarf_stack { + unsigned int depth; + struct dwarf_frame **level; +}; + +/* + * Call Frame instruction opcodes. + */ +#define DW_CFA_advance_loc 0x40 +#define DW_CFA_offset 0x80 +#define DW_CFA_restore 0xc0 +#define DW_CFA_nop 0x00 +#define DW_CFA_set_loc 0x01 +#define DW_CFA_advance_loc1 0x02 +#define DW_CFA_advance_loc2 0x03 +#define DW_CFA_advance_loc4 0x04 +#define DW_CFA_offset_extended 0x05 +#define DW_CFA_restore_extended 0x06 +#define DW_CFA_undefined 0x07 +#define DW_CFA_same_value 0x08 +#define DW_CFA_register 0x09 +#define DW_CFA_remember_state 0x0a +#define DW_CFA_restore_state 0x0b +#define DW_CFA_def_cfa 0x0c +#define DW_CFA_def_cfa_register 0x0d +#define DW_CFA_def_cfa_offset 0x0e +#define DW_CFA_def_cfa_expression 0x0f +#define DW_CFA_expression 0x10 +#define DW_CFA_offset_extended_sf 0x11 +#define DW_CFA_def_cfa_sf 0x12 +#define DW_CFA_def_cfa_offset_sf 0x13 +#define DW_CFA_val_offset 0x14 +#define DW_CFA_val_offset_sf 0x15 +#define DW_CFA_val_expression 0x16 +#define DW_CFA_lo_user 0x1c +#define DW_CFA_hi_user 0x3f + +/* + * Some call frame instructions encode their operands in the opcode. We + * need some helper functions to extract both the opcode and operands + * from an instruction. + */ +static inline unsigned int DW_CFA_opcode(unsigned long insn) +{ + return (insn & 0xc0); +} + +static inline unsigned int DW_CFA_operand(unsigned long insn) +{ + return (insn & 0x3f); +} + +#define DW_EH_FRAME_CIE 0 /* .eh_frame CIE IDs are 0 */ +#define DW_CIE_ID 0xffffffff +#define DW64_CIE_ID 0xffffffffffffffffULL + +/* + * DWARF FDE/CIE length field values. + */ +#define DW_EXT_LO 0xfffffff0 +#define DW_EXT_HI 0xffffffff +#define DW_EXT_DWARF64 DW_EXT_HI + +extern void dwarf_unwinder_init(void); + +extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, + struct dwarf_frame *); +#endif /* __ASSEMBLY__ */ + +#define CFI_STARTPROC .cfi_startproc +#define CFI_ENDPROC .cfi_endproc +#define CFI_DEF_CFA .cfi_def_cfa +#define CFI_REGISTER .cfi_register +#define CFI_REL_OFFSET .cfi_rel_offset + +#else + +/* + * Use the asm comment character to ignore the rest of the line. + */ +#define CFI_IGNORE ! + +#define CFI_STARTPROC CFI_IGNORE +#define CFI_ENDPROC CFI_IGNORE +#define CFI_DEF_CFA CFI_IGNORE +#define CFI_REGISTER CFI_IGNORE +#define CFI_REL_OFFSET CFI_IGNORE + +#ifndef __ASSEMBLY__ +static inline void dwarf_unwinder_init(void) +{ +} +#endif + +#endif /* CONFIG_DWARF_UNWINDER */ + +#endif /* __ASM_SH_DWARF_H */ diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h index 01a4076a3719..a78701da775b 100644 --- a/arch/sh/include/asm/sections.h +++ b/arch/sh/include/asm/sections.h @@ -7,6 +7,7 @@ extern void __nosave_begin, __nosave_end; extern long __machvec_start, __machvec_end; extern char __uncached_start, __uncached_end; extern char _ebss[]; +extern char __start_eh_frame[], __stop_eh_frame[]; #endif /* __ASM_SH_SECTIONS_H */ diff --git a/arch/sh/include/asm/vmlinux.lds.h b/arch/sh/include/asm/vmlinux.lds.h new file mode 100644 index 000000000000..244ec4ad9a79 --- /dev/null +++ b/arch/sh/include/asm/vmlinux.lds.h @@ -0,0 +1,17 @@ +#ifndef __ASM_SH_VMLINUX_LDS_H +#define __ASM_SH_VMLINUX_LDS_H + +#include + +#ifdef CONFIG_DWARF_UNWINDER +#define DWARF_EH_FRAME \ + .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_eh_frame) = .; \ + *(.eh_frame) \ + VMLINUX_SYMBOL(__stop_eh_frame) = .; \ + } +#else +#define DWARF_EH_FRAME +#endif + +#endif /* __ASM_SH_VMLINUX_LDS_H */ diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 37a3b7704fc6..f2245ebf0b31 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 @@ -33,6 +33,7 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_DUMP_CODE) += disassemble.o obj-$(CONFIG_HIBERNATION) += swsusp.o +obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 index 00b73e7598cb..639ee514266c 100644 --- a/arch/sh/kernel/Makefile_64 +++ b/arch/sh/kernel/Makefile_64 @@ -13,6 +13,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_IO_TRAPPED) += io_trapped.o obj-$(CONFIG_GENERIC_GPIO) += gpio.o +obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c new file mode 100644 index 000000000000..09c6fd7fd05f --- /dev/null +++ b/arch/sh/kernel/dwarf.c @@ -0,0 +1,876 @@ +/* + * Copyright (C) 2009 Matt Fleming + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * This is an implementation of a DWARF unwinder. Its main purpose is + * for generating stacktrace information. Based on the DWARF 3 + * specification from http://www.dwarfstd.org. + * + * TODO: + * - DWARF64 doesn't work. + */ + +/* #define DEBUG */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static LIST_HEAD(dwarf_cie_list); +DEFINE_SPINLOCK(dwarf_cie_lock); + +static LIST_HEAD(dwarf_fde_list); +DEFINE_SPINLOCK(dwarf_fde_lock); + +static struct dwarf_cie *cached_cie; + +/* + * Figure out whether we need to allocate some dwarf registers. If dwarf + * registers have already been allocated then we may need to realloc + * them. "reg" is a register number that we need to be able to access + * after this call. + * + * Register numbers start at zero, therefore we need to allocate space + * for "reg" + 1 registers. + */ +static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, + unsigned int reg) +{ + struct dwarf_reg *regs; + unsigned int num_regs = reg + 1; + size_t new_size; + size_t old_size; + + new_size = num_regs * sizeof(*regs); + old_size = frame->num_regs * sizeof(*regs); + + /* Fast path: don't allocate any regs if we've already got enough. */ + if (frame->num_regs >= num_regs) + return; + + regs = kzalloc(new_size, GFP_KERNEL); + if (!regs) { + printk(KERN_WARNING "Unable to allocate DWARF registers\n"); + /* + * Let's just bomb hard here, we have no way to + * gracefully recover. + */ + BUG(); + } + + if (frame->regs) { + memcpy(regs, frame->regs, old_size); + kfree(frame->regs); + } + + frame->regs = regs; + frame->num_regs = num_regs; +} + +/** + * dwarf_read_addr - read dwarf data + * @src: source address of data + * @dst: destination address to store the data to + * + * Read 'n' bytes from @src, where 'n' is the size of an address on + * the native machine. We return the number of bytes read, which + * should always be 'n'. We also have to be careful when reading + * from @src and writing to @dst, because they can be arbitrarily + * aligned. Return 'n' - the number of bytes read. + */ +static inline int dwarf_read_addr(void *src, void *dst) +{ + u32 val = __get_unaligned_cpu32(src); + __put_unaligned_cpu32(val, dst); + + return sizeof(unsigned long *); +} + +/** + * dwarf_read_uleb128 - read unsigned LEB128 data + * @addr: the address where the ULEB128 data is stored + * @ret: address to store the result + * + * Decode an unsigned LEB128 encoded datum. The algorithm is taken + * from Appendix C of the DWARF 3 spec. For information on the + * encodings refer to section "7.6 - Variable Length Data". Return + * the number of bytes read. + */ +static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret) +{ + unsigned int result; + unsigned char byte; + int shift, count; + + result = 0; + shift = 0; + count = 0; + + while (1) { + byte = __raw_readb(addr); + addr++; + count++; + + result |= (byte & 0x7f) << shift; + shift += 7; + + if (!(byte & 0x80)) + break; + } + + *ret = result; + + return count; +} + +/** + * dwarf_read_leb128 - read signed LEB128 data + * @addr: the address of the LEB128 encoded data + * @ret: address to store the result + * + * Decode signed LEB128 data. The algorithm is taken from Appendix + * C of the DWARF 3 spec. Return the number of bytes read. + */ +static inline unsigned long dwarf_read_leb128(char *addr, int *ret) +{ + unsigned char byte; + int result, shift; + int num_bits; + int count; + + result = 0; + shift = 0; + count = 0; + + while (1) { + byte = __raw_readb(addr); + addr++; + result |= (byte & 0x7f) << shift; + shift += 7; + count++; + + if (!(byte & 0x80)) + break; + } + + /* The number of bits in a signed integer. */ + num_bits = 8 * sizeof(result); + + if ((shift < num_bits) && (byte & 0x40)) + result |= (-1 << shift); + + *ret = result; + + return count; +} + +/** + * dwarf_read_encoded_value - return the decoded value at @addr + * @addr: the address of the encoded value + * @val: where to write the decoded value + * @encoding: the encoding with which we can decode @addr + * + * GCC emits encoded address in the .eh_frame FDE entries. Decode + * the value at @addr using @encoding. The decoded value is written + * to @val and the number of bytes read is returned. + */ +static int dwarf_read_encoded_value(char *addr, unsigned long *val, + char encoding) +{ + unsigned long decoded_addr = 0; + int count = 0; + + switch (encoding & 0x70) { + case DW_EH_PE_absptr: + break; + case DW_EH_PE_pcrel: + decoded_addr = (unsigned long)addr; + break; + default: + pr_debug("encoding=0x%x\n", (encoding & 0x70)); + BUG(); + } + + if ((encoding & 0x07) == 0x00) + encoding |= DW_EH_PE_udata4; + + switch (encoding & 0x0f) { + case DW_EH_PE_sdata4: + case DW_EH_PE_udata4: + count += 4; + decoded_addr += __get_unaligned_cpu32(addr); + __raw_writel(decoded_addr, val); + break; + default: + pr_debug("encoding=0x%x\n", encoding); + BUG(); + } + + return count; +} + +/** + * dwarf_entry_len - return the length of an FDE or CIE + * @addr: the address of the entry + * @len: the length of the entry + * + * Read the initial_length field of the entry and store the size of + * the entry in @len. We return the number of bytes read. Return a + * count of 0 on error. + */ +static inline int dwarf_entry_len(char *addr, unsigned long *len) +{ + u32 initial_len; + int count; + + initial_len = __get_unaligned_cpu32(addr); + count = 4; + + /* + * An initial length field value in the range DW_LEN_EXT_LO - + * DW_LEN_EXT_HI indicates an extension, and should not be + * interpreted as a length. The only extension that we currently + * understand is the use of DWARF64 addresses. + */ + if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) { + /* + * The 64-bit length field immediately follows the + * compulsory 32-bit length field. + */ + if (initial_len == DW_EXT_DWARF64) { + *len = __get_unaligned_cpu64(addr + 4); + count = 12; + } else { + printk(KERN_WARNING "Unknown DWARF extension\n"); + count = 0; + } + } else + *len = initial_len; + + return count; +} + +/** + * dwarf_lookup_cie - locate the cie + * @cie_ptr: pointer to help with lookup + */ +static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) +{ + struct dwarf_cie *cie, *n; + unsigned long flags; + + spin_lock_irqsave(&dwarf_cie_lock, flags); + + /* + * We've cached the last CIE we looked up because chances are + * that the FDE wants this CIE. + */ + if (cached_cie && cached_cie->cie_pointer == cie_ptr) { + cie = cached_cie; + goto out; + } + + list_for_each_entry_safe(cie, n, &dwarf_cie_list, link) { + if (cie->cie_pointer == cie_ptr) { + cached_cie = cie; + break; + } + } + + /* Couldn't find the entry in the list. */ + if (&cie->link == &dwarf_cie_list) + cie = NULL; +out: + spin_unlock_irqrestore(&dwarf_cie_lock, flags); + return cie; +} + +/** + * dwarf_lookup_fde - locate the FDE that covers pc + * @pc: the program counter + */ +struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) +{ + unsigned long flags; + struct dwarf_fde *fde, *n; + + spin_lock_irqsave(&dwarf_fde_lock, flags); + list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) { + unsigned long start, end; + + start = fde->initial_location; + end = fde->initial_location + fde->address_range; + + if (pc >= start && pc < end) + break; + } + + /* Couldn't find the entry in the list. */ + if (&fde->link == &dwarf_fde_list) + fde = NULL; + + spin_unlock_irqrestore(&dwarf_fde_lock, flags); + + return fde; +} + +/** + * dwarf_cfa_execute_insns - execute instructions to calculate a CFA + * @insn_start: address of the first instruction + * @insn_end: address of the last instruction + * @cie: the CIE for this function + * @fde: the FDE for this function + * @frame: the instructions calculate the CFA for this frame + * @pc: the program counter of the address we're interested in + * + * Execute the Call Frame instruction sequence starting at + * @insn_start and ending at @insn_end. The instructions describe + * how to calculate the Canonical Frame Address of a stackframe. + * Store the results in @frame. + */ +static int dwarf_cfa_execute_insns(unsigned char *insn_start, + unsigned char *insn_end, + struct dwarf_cie *cie, + struct dwarf_fde *fde, + struct dwarf_frame *frame, + unsigned long pc) +{ + unsigned char insn; + unsigned char *current_insn; + unsigned int count, delta, reg, expr_len, offset; + + current_insn = insn_start; + + while (current_insn < insn_end && frame->pc <= pc) { + insn = __raw_readb(current_insn++); + + /* + * Firstly, handle the opcodes that embed their operands + * in the instructions. + */ + switch (DW_CFA_opcode(insn)) { + case DW_CFA_advance_loc: + delta = DW_CFA_operand(insn); + delta *= cie->code_alignment_factor; + frame->pc += delta; + continue; + /* NOTREACHED */ + case DW_CFA_offset: + reg = DW_CFA_operand(insn); + count = dwarf_read_uleb128(current_insn, &offset); + current_insn += count; + offset *= cie->data_alignment_factor; + dwarf_frame_alloc_regs(frame, reg); + frame->regs[reg].addr = offset; + frame->regs[reg].flags |= DWARF_REG_OFFSET; + continue; + /* NOTREACHED */ + case DW_CFA_restore: + reg = DW_CFA_operand(insn); + continue; + /* NOTREACHED */ + } + + /* + * Secondly, handle the opcodes that don't embed their + * operands in the instruction. + */ + switch (insn) { + case DW_CFA_nop: + continue; + case DW_CFA_advance_loc1: + delta = *current_insn++; + frame->pc += delta * cie->code_alignment_factor; + break; + case DW_CFA_advance_loc2: + delta = __get_unaligned_cpu16(current_insn); + current_insn += 2; + frame->pc += delta * cie->code_alignment_factor; + break; + case DW_CFA_advance_loc4: + delta = __get_unaligned_cpu32(current_insn); + current_insn += 4; + frame->pc += delta * cie->code_alignment_factor; + break; + case DW_CFA_offset_extended: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + count = dwarf_read_uleb128(current_insn, &offset); + current_insn += count; + offset *= cie->data_alignment_factor; + break; + case DW_CFA_restore_extended: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + break; + case DW_CFA_undefined: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + break; + case DW_CFA_def_cfa: + count = dwarf_read_uleb128(current_insn, + &frame->cfa_register); + current_insn += count; + count = dwarf_read_uleb128(current_insn, + &frame->cfa_offset); + current_insn += count; + + frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; + break; + case DW_CFA_def_cfa_register: + count = dwarf_read_uleb128(current_insn, + &frame->cfa_register); + current_insn += count; + frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; + break; + case DW_CFA_def_cfa_offset: + count = dwarf_read_uleb128(current_insn, &offset); + current_insn += count; + frame->cfa_offset = offset; + break; + case DW_CFA_def_cfa_expression: + count = dwarf_read_uleb128(current_insn, &expr_len); + current_insn += count; + + frame->cfa_expr = current_insn; + frame->cfa_expr_len = expr_len; + current_insn += expr_len; + + frame->flags |= DWARF_FRAME_CFA_REG_EXP; + break; + case DW_CFA_offset_extended_sf: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + count = dwarf_read_leb128(current_insn, &offset); + current_insn += count; + offset *= cie->data_alignment_factor; + dwarf_frame_alloc_regs(frame, reg); + frame->regs[reg].flags |= DWARF_REG_OFFSET; + frame->regs[reg].addr = offset; + break; + case DW_CFA_val_offset: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + count = dwarf_read_leb128(current_insn, &offset); + offset *= cie->data_alignment_factor; + frame->regs[reg].flags |= DWARF_REG_OFFSET; + frame->regs[reg].addr = offset; + break; + default: + pr_debug("unhandled DWARF instruction 0x%x\n", insn); + break; + } + } + + return 0; +} + +/** + * dwarf_unwind_stack - recursively unwind the stack + * @pc: address of the function to unwind + * @prev: struct dwarf_frame of the previous stackframe on the callstack + * + * Return a struct dwarf_frame representing the most recent frame + * on the callstack. Each of the lower (older) stack frames are + * linked via the "prev" member. + */ +struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, + struct dwarf_frame *prev) +{ + struct dwarf_frame *frame; + struct dwarf_cie *cie; + struct dwarf_fde *fde; + unsigned long addr; + int i, offset; + + /* + * If this is the first invocation of this recursive function we + * need get the contents of a physical register to get the CFA + * in order to begin the virtual unwinding of the stack. + * + * The constant DWARF_ARCH_UNWIND_OFFSET is added to the address of + * this function because the return address register + * (DWARF_ARCH_RA_REG) will probably not be initialised until a + * few instructions into the prologue. + */ + if (!pc && !prev) { + pc = (unsigned long)&dwarf_unwind_stack; + pc += DWARF_ARCH_UNWIND_OFFSET; + } + + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) + return NULL; + + frame->prev = prev; + + fde = dwarf_lookup_fde(pc); + if (!fde) { + /* + * This is our normal exit path - the one that stops the + * recursion. There's two reasons why we might exit + * here, + * + * a) pc has no asscociated DWARF frame info and so + * we don't know how to unwind this frame. This is + * usually the case when we're trying to unwind a + * frame that was called from some assembly code + * that has no DWARF info, e.g. syscalls. + * + * b) the DEBUG info for pc is bogus. There's + * really no way to distinguish this case from the + * case above, which sucks because we could print a + * warning here. + */ + return NULL; + } + + cie = dwarf_lookup_cie(fde->cie_pointer); + + frame->pc = fde->initial_location; + + /* CIE initial instructions */ + dwarf_cfa_execute_insns(cie->initial_instructions, + cie->instructions_end, cie, fde, frame, pc); + + /* FDE instructions */ + dwarf_cfa_execute_insns(fde->instructions, fde->end, cie, + fde, frame, pc); + + /* Calculate the CFA */ + switch (frame->flags) { + case DWARF_FRAME_CFA_REG_OFFSET: + if (prev) { + BUG_ON(!prev->regs[frame->cfa_register].flags); + + addr = prev->cfa; + addr += prev->regs[frame->cfa_register].addr; + frame->cfa = __raw_readl(addr); + + } else { + /* + * Again, this is the first invocation of this + * recurisve function. We need to physically + * read the contents of a register in order to + * get the Canonical Frame Address for this + * function. + */ + frame->cfa = dwarf_read_arch_reg(frame->cfa_register); + } + + frame->cfa += frame->cfa_offset; + break; + default: + BUG(); + } + + /* If we haven't seen the return address reg, we're screwed. */ + BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags); + + for (i = 0; i <= frame->num_regs; i++) { + struct dwarf_reg *reg = &frame->regs[i]; + + if (!reg->flags) + continue; + + offset = reg->addr; + offset += frame->cfa; + } + + addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr; + frame->return_addr = __raw_readl(addr); + + frame->next = dwarf_unwind_stack(frame->return_addr, frame); + return frame; +} + +static int dwarf_parse_cie(void *entry, void *p, unsigned long len, + unsigned char *end) +{ + struct dwarf_cie *cie; + unsigned long flags; + int count; + + cie = kzalloc(sizeof(*cie), GFP_KERNEL); + if (!cie) + return -ENOMEM; + + cie->length = len; + + /* + * Record the offset into the .eh_frame section + * for this CIE. It allows this CIE to be + * quickly and easily looked up from the + * corresponding FDE. + */ + cie->cie_pointer = (unsigned long)entry; + + cie->version = *(char *)p++; + BUG_ON(cie->version != 1); + + cie->augmentation = p; + p += strlen(cie->augmentation) + 1; + + count = dwarf_read_uleb128(p, &cie->code_alignment_factor); + p += count; + + count = dwarf_read_leb128(p, &cie->data_alignment_factor); + p += count; + + /* + * Which column in the rule table contains the + * return address? + */ + if (cie->version == 1) { + cie->return_address_reg = __raw_readb(p); + p++; + } else { + count = dwarf_read_uleb128(p, &cie->return_address_reg); + p += count; + } + + if (cie->augmentation[0] == 'z') { + unsigned int length, count; + cie->flags |= DWARF_CIE_Z_AUGMENTATION; + + count = dwarf_read_uleb128(p, &length); + p += count; + + BUG_ON((unsigned char *)p > end); + + cie->initial_instructions = p + length; + cie->augmentation++; + } + + while (*cie->augmentation) { + /* + * "L" indicates a byte showing how the + * LSDA pointer is encoded. Skip it. + */ + if (*cie->augmentation == 'L') { + p++; + cie->augmentation++; + } else if (*cie->augmentation == 'R') { + /* + * "R" indicates a byte showing + * how FDE addresses are + * encoded. + */ + cie->encoding = *(char *)p++; + cie->augmentation++; + } else if (*cie->augmentation == 'P') { + /* + * "R" indicates a personality + * routine in the CIE + * augmentation. + */ + BUG(); + } else if (*cie->augmentation == 'S') { + BUG(); + } else { + /* + * Unknown augmentation. Assume + * 'z' augmentation. + */ + p = cie->initial_instructions; + BUG_ON(!p); + break; + } + } + + cie->initial_instructions = p; + cie->instructions_end = end; + + /* Add to list */ + spin_lock_irqsave(&dwarf_cie_lock, flags); + list_add_tail(&cie->link, &dwarf_cie_list); + spin_unlock_irqrestore(&dwarf_cie_lock, flags); + + return 0; +} + +static int dwarf_parse_fde(void *entry, u32 entry_type, + void *start, unsigned long len) +{ + struct dwarf_fde *fde; + struct dwarf_cie *cie; + unsigned long flags; + int count; + void *p = start; + + fde = kzalloc(sizeof(*fde), GFP_KERNEL); + if (!fde) + return -ENOMEM; + + fde->length = len; + + /* + * In a .eh_frame section the CIE pointer is the + * delta between the address within the FDE + */ + fde->cie_pointer = (unsigned long)(p - entry_type - 4); + + cie = dwarf_lookup_cie(fde->cie_pointer); + fde->cie = cie; + + if (cie->encoding) + count = dwarf_read_encoded_value(p, &fde->initial_location, + cie->encoding); + else + count = dwarf_read_addr(p, &fde->initial_location); + + p += count; + + if (cie->encoding) + count = dwarf_read_encoded_value(p, &fde->address_range, + cie->encoding & 0x0f); + else + count = dwarf_read_addr(p, &fde->address_range); + + p += count; + + if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) { + unsigned int length; + count = dwarf_read_uleb128(p, &length); + p += count + length; + } + + /* Call frame instructions. */ + fde->instructions = p; + fde->end = start + len; + + /* Add to list. */ + spin_lock_irqsave(&dwarf_fde_lock, flags); + list_add_tail(&fde->link, &dwarf_fde_list); + spin_unlock_irqrestore(&dwarf_fde_lock, flags); + + return 0; +} + +static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs, + unsigned long *sp, + const struct stacktrace_ops *ops, void *data) +{ + struct dwarf_frame *frame; + + frame = dwarf_unwind_stack(0, NULL); + + while (frame && frame->return_addr) { + ops->address(data, frame->return_addr, 1); + frame = frame->next; + } +} + +static struct unwinder dwarf_unwinder = { + .name = "dwarf-unwinder", + .dump = dwarf_unwinder_dump, + .rating = 150, +}; + +static void dwarf_unwinder_cleanup(void) +{ + struct dwarf_cie *cie, *m; + struct dwarf_fde *fde, *n; + unsigned long flags; + + /* + * Deallocate all the memory allocated for the DWARF unwinder. + * Traverse all the FDE/CIE lists and remove and free all the + * memory associated with those data structures. + */ + spin_lock_irqsave(&dwarf_cie_lock, flags); + list_for_each_entry_safe(cie, m, &dwarf_cie_list, link) + kfree(cie); + spin_unlock_irqrestore(&dwarf_cie_lock, flags); + + spin_lock_irqsave(&dwarf_fde_lock, flags); + list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) + kfree(fde); + spin_unlock_irqrestore(&dwarf_fde_lock, flags); +} + +/** + * dwarf_unwinder_init - initialise the dwarf unwinder + * + * Build the data structures describing the .dwarf_frame section to + * make it easier to lookup CIE and FDE entries. Because the + * .eh_frame section is packed as tightly as possible it is not + * easy to lookup the FDE for a given PC, so we build a list of FDE + * and CIE entries that make it easier. + */ +void dwarf_unwinder_init(void) +{ + u32 entry_type; + void *p, *entry; + int count, err; + unsigned long len; + unsigned int c_entries, f_entries; + unsigned char *end; + INIT_LIST_HEAD(&dwarf_cie_list); + INIT_LIST_HEAD(&dwarf_fde_list); + + c_entries = 0; + f_entries = 0; + entry = &__start_eh_frame; + + while ((char *)entry < __stop_eh_frame) { + p = entry; + + count = dwarf_entry_len(p, &len); + if (count == 0) { + /* + * We read a bogus length field value. There is + * nothing we can do here apart from disabling + * the DWARF unwinder. We can't even skip this + * entry and move to the next one because 'len' + * tells us where our next entry is. + */ + goto out; + } else + p += count; + + /* initial length does not include itself */ + end = p + len; + + entry_type = __get_unaligned_cpu32(p); + p += 4; + + if (entry_type == DW_EH_FRAME_CIE) { + err = dwarf_parse_cie(entry, p, len, end); + if (err < 0) + goto out; + else + c_entries++; + } else { + err = dwarf_parse_fde(entry, entry_type, p, len); + if (err < 0) + goto out; + else + f_entries++; + } + + entry = (char *)entry + len + 4; + } + + printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", + c_entries, f_entries); + + err = unwinder_register(&dwarf_unwinder); + if (err) + goto out; + + return; + +out: + printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err); + dwarf_unwinder_cleanup(); +} diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 278c68c60488..2bb43dc74f22 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -261,6 +262,9 @@ void __init init_IRQ(void) sh_mv.mv_init_irq(); irq_ctx_init(smp_processor_id()); + + /* This needs to be early, but not too early.. */ + dwarf_unwinder_init(); } #ifdef CONFIG_SPARSE_IRQ diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index 80dc9f8d9412..1b7d9d541e01 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -12,7 +12,7 @@ OUTPUT_ARCH(sh) #include #include -#include +#include ENTRY(_start) SECTIONS @@ -70,6 +70,8 @@ SECTIONS _edata = .; /* End of data section */ + DWARF_EH_FRAME + . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; INIT_TEXT_SECTION(PAGE_SIZE) -- cgit v1.2.3-59-g8ed1b From 0b930489b8606224b829c8a6037eac24249a97ec Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 2 Aug 2009 22:33:26 +0100 Subject: sh: Setup the frame register in asm code In order to use DWARF unwinder info the frame register has to contain a valid value. Whilst GCC takes care of this for C code, we have to do it ourselves for assembly. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/entry-macros.S | 12 ++++++++++++ arch/sh/kernel/cpu/sh3/entry.S | 1 + arch/sh/kernel/entry-common.S | 1 + 3 files changed, 14 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/entry-macros.S b/arch/sh/include/asm/entry-macros.S index 1bdd93891cd7..64fd0de24daf 100644 --- a/arch/sh/include/asm/entry-macros.S +++ b/arch/sh/include/asm/entry-macros.S @@ -108,3 +108,15 @@ #else # define PREF(x) nop #endif + + /* + * Macro for use within assembly. Because the DWARF unwinder + * needs to use the frame register to unwind the stack, we + * need to setup r14 with the value of the stack pointer as + * the return address is usually on the stack somewhere. + */ + .macro setup_frame_reg +#ifdef CONFIG_DWARF_UNWINDER + mov r15, r14 +#endif + .endm diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 3cb531f233f2..67ad6467c694 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -137,6 +137,7 @@ ENTRY(tlb_protection_violation_store) mov #1, r5 call_dpf: + setup_frame_reg mov.l 1f, r0 mov r5, r8 mov.l @r0, r6 diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index fc26ccd82789..b3b215fb21f7 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -285,6 +285,7 @@ ret_from_fork: * system calls and debug traps through their respective jump tables. */ ENTRY(system_call) + setup_frame_reg #if !defined(CONFIG_CPU_SH2) mov.l 1f, r9 mov.l @r9, r8 ! Read from TRA (Trap Address) Register -- cgit v1.2.3-59-g8ed1b From cafb0ddac60556f7d2d4cd0ef1a93da8a6c71ffb Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 2 Aug 2009 22:40:11 +0100 Subject: sh: Add CFI annotations for exception return. Annotate various assembly code paths with CFI assembler directives so that DWARF unwind info is available for the unwinder. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/entry-common.S | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index b3b215fb21f7..e63178fefb9b 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -43,6 +43,7 @@ * syscall # * */ +#include #if defined(CONFIG_PREEMPT) # define preempt_stop() cli ; TRACE_IRQS_OFF @@ -66,6 +67,11 @@ ENTRY(exception_error) .align 2 ret_from_exception: + CFI_STARTPROC simple + CFI_DEF_CFA r14, 0 + CFI_REL_OFFSET 17, 64 + CFI_REL_OFFSET 15, 0 + CFI_REL_OFFSET 14, 56 preempt_stop() ENTRY(ret_from_irq) ! @@ -240,6 +246,7 @@ debug_trap: nop bra __restore_all nop + CFI_ENDPROC .align 2 1: .long debug_trap_table -- cgit v1.2.3-59-g8ed1b From 3497447f15485b479366ec86effaac16fc82411b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 14 Aug 2009 02:10:59 +0900 Subject: sh: unwinder: Fix up usage of unaligned accessors. This was using internal symbols for unaligned accesses, bypassing the exposed interface for variable sized safe accesses. This converts all of the __get_unaligned_cpuXX() users over to get_unaligned() directly, relying on the cast to select the proper internal routine. Additionally, the __put_unaligned_cpuXX() case is superfluous given that the destination address is aligned in all of the current cases, so just drop that outright. Furthermore, this switches to the asm/unaligned.h header instead of the asm-generic version, which was silently bypassing the SH-4A optimized unaligned ops. Signed-off-by: Paul Mundt --- arch/sh/kernel/dwarf.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 09c6fd7fd05f..d1d8536e5ba3 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include @@ -87,11 +87,9 @@ static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, * from @src and writing to @dst, because they can be arbitrarily * aligned. Return 'n' - the number of bytes read. */ -static inline int dwarf_read_addr(void *src, void *dst) +static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst) { - u32 val = __get_unaligned_cpu32(src); - __put_unaligned_cpu32(val, dst); - + *dst = get_unaligned(src); return sizeof(unsigned long *); } @@ -207,7 +205,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val, case DW_EH_PE_sdata4: case DW_EH_PE_udata4: count += 4; - decoded_addr += __get_unaligned_cpu32(addr); + decoded_addr += get_unaligned((u32 *)addr); __raw_writel(decoded_addr, val); break; default: @@ -232,7 +230,7 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len) u32 initial_len; int count; - initial_len = __get_unaligned_cpu32(addr); + initial_len = get_unaligned((u32 *)addr); count = 4; /* @@ -247,7 +245,7 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len) * compulsory 32-bit length field. */ if (initial_len == DW_EXT_DWARF64) { - *len = __get_unaligned_cpu64(addr + 4); + *len = get_unaligned((u64 *)addr + 4); count = 12; } else { printk(KERN_WARNING "Unknown DWARF extension\n"); @@ -392,12 +390,12 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, frame->pc += delta * cie->code_alignment_factor; break; case DW_CFA_advance_loc2: - delta = __get_unaligned_cpu16(current_insn); + delta = get_unaligned((u16 *)current_insn); current_insn += 2; frame->pc += delta * cie->code_alignment_factor; break; case DW_CFA_advance_loc4: - delta = __get_unaligned_cpu32(current_insn); + delta = get_unaligned((u32 *)current_insn); current_insn += 4; frame->pc += delta * cie->code_alignment_factor; break; @@ -841,7 +839,7 @@ void dwarf_unwinder_init(void) /* initial length does not include itself */ end = p + len; - entry_type = __get_unaligned_cpu32(p); + entry_type = get_unaligned((u32 *)p); p += 4; if (entry_type == DW_EH_FRAME_CIE) { -- cgit v1.2.3-59-g8ed1b From bf43a160ff2d67a21b076286bab6f5e2c993bd0a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 14 Aug 2009 03:06:13 +0900 Subject: sh: unwinder: Restore put_unaligned() for an unaligned destination. The destination address might be unaligned, so set it with put_unaligned() for safety. This restores the previous behaviour, albeit through the proper API. Signed-off-by: Paul Mundt --- arch/sh/kernel/dwarf.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index d1d8536e5ba3..49d039f19426 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -89,7 +89,8 @@ static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, */ static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst) { - *dst = get_unaligned(src); + u32 val = get_unaligned(src); + put_unaligned(val, dst); return sizeof(unsigned long *); } -- cgit v1.2.3-59-g8ed1b From f826466772ae52f26152287fcb2259351de78f0f Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Thu, 13 Aug 2009 20:41:31 +0100 Subject: sh: Delete DWARF_ARCH_UNWIND_OFFSET Trying to figure out the best value for DWARF_ARCH_UNWIND_OFFSET is tricky at best. Various things can change the size (and offset from the beginning of the function) of the prologue. Notably, turning on ftrace adds calls to mcount at the beginning of functions, thereby pushing the prologue further into the function. So replace DWARF_ARCH_UNWIND_OFFSET with some code that continues to execute CFA instructions until the value of return address register is defined. This is safe to do because we know that the return address must have been pushed onto the frame before our first function call; we just can't figure out where at compile-time. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/dwarf.h | 5 ----- arch/sh/kernel/dwarf.c | 44 +++++++++++++++++++++++++++++++++++--------- 2 files changed, 35 insertions(+), 14 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index 23eba12880b3..60b180728d8d 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h @@ -193,11 +193,6 @@ */ #define DWARF_ARCH_RA_REG 17 -/* - * At what offset into dwarf_unwind_stack() is DWARF_ARCH_RA_REG setup? - */ -#define DWARF_ARCH_UNWIND_OFFSET 0x20 - #ifndef __ASSEMBLY__ /* * Read either the frame pointer (r14) or the stack pointer (r15). diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 49d039f19426..83f3cc92549f 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -330,6 +330,7 @@ struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) * @fde: the FDE for this function * @frame: the instructions calculate the CFA for this frame * @pc: the program counter of the address we're interested in + * @define_ra: keep executing insns until the return addr reg is defined? * * Execute the Call Frame instruction sequence starting at * @insn_start and ending at @insn_end. The instructions describe @@ -341,17 +342,36 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, struct dwarf_cie *cie, struct dwarf_fde *fde, struct dwarf_frame *frame, - unsigned long pc) + unsigned long pc, + bool define_ra) { unsigned char insn; unsigned char *current_insn; unsigned int count, delta, reg, expr_len, offset; + bool seen_ra_reg; current_insn = insn_start; - while (current_insn < insn_end && frame->pc <= pc) { + /* + * If we're executing instructions for the dwarf_unwind_stack() + * FDE we need to keep executing instructions until the value of + * DWARF_ARCH_RA_REG is defined. See the comment in + * dwarf_unwind_stack() for more details. + */ + if (define_ra) + seen_ra_reg = false; + else + seen_ra_reg = true; + + while (current_insn < insn_end && (frame->pc <= pc || !seen_ra_reg) ) { insn = __raw_readb(current_insn++); + if (!seen_ra_reg) { + if (frame->num_regs >= DWARF_ARCH_RA_REG && + frame->regs[DWARF_ARCH_RA_REG].flags) + seen_ra_reg = true; + } + /* * Firstly, handle the opcodes that embed their operands * in the instructions. @@ -490,20 +510,25 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, struct dwarf_fde *fde; unsigned long addr; int i, offset; + bool define_ra = false; /* * If this is the first invocation of this recursive function we * need get the contents of a physical register to get the CFA * in order to begin the virtual unwinding of the stack. * - * The constant DWARF_ARCH_UNWIND_OFFSET is added to the address of - * this function because the return address register - * (DWARF_ARCH_RA_REG) will probably not be initialised until a - * few instructions into the prologue. + * Setting "define_ra" to true indictates that we want + * dwarf_cfa_execute_insns() to continue executing instructions + * until we know how to calculate the value of DWARF_ARCH_RA_REG + * (which we need in order to kick off the whole unwinding + * process). + * + * NOTE: the return address is guaranteed to be setup by the + * time this function makes its first function call. */ if (!pc && !prev) { pc = (unsigned long)&dwarf_unwind_stack; - pc += DWARF_ARCH_UNWIND_OFFSET; + define_ra = true; } frame = kzalloc(sizeof(*frame), GFP_KERNEL); @@ -539,11 +564,12 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, /* CIE initial instructions */ dwarf_cfa_execute_insns(cie->initial_instructions, - cie->instructions_end, cie, fde, frame, pc); + cie->instructions_end, cie, fde, + frame, pc, false); /* FDE instructions */ dwarf_cfa_execute_insns(fde->instructions, fde->end, cie, - fde, frame, pc); + fde, frame, pc, define_ra); /* Calculate the CFA */ switch (frame->flags) { -- cgit v1.2.3-59-g8ed1b From 0fc11e3618bb1f9e0640127ec84f5d2690fa3894 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 14 Aug 2009 23:58:37 +0900 Subject: sh: unwinder: Convert frame allocations to GFP_ATOMIC. save_stack_trace_tsk() and friends can be called from atomic context (as triggered by latencytop), and subsequently hit two problematic allocation points that were using GFP_KERNEL (these were dwarf_unwind_stack() and dwarf_frame_alloc_regs()). Convert these over to GFP_ATOMIC and get latencytop working with the DWARF unwinder. Signed-off-by: Paul Mundt --- arch/sh/kernel/dwarf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 83f3cc92549f..db021361b161 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -57,7 +57,7 @@ static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, if (frame->num_regs >= num_regs) return; - regs = kzalloc(new_size, GFP_KERNEL); + regs = kzalloc(new_size, GFP_ATOMIC); if (!regs) { printk(KERN_WARNING "Unable to allocate DWARF registers\n"); /* @@ -531,7 +531,7 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, define_ra = true; } - frame = kzalloc(sizeof(*frame), GFP_KERNEL); + frame = kzalloc(sizeof(*frame), GFP_ATOMIC); if (!frame) return NULL; -- cgit v1.2.3-59-g8ed1b From 48e4d4604b7da52d6a56ddc821a460b183b7353d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 01:05:46 +0900 Subject: sh: stacktrace: Add reliability checks in address saving ops. This adopts the reliability checks from the x86 stacktrace code so known bad addresses are not recorded in the stack trace buffer. Signed-off-by: Paul Mundt --- arch/sh/kernel/stacktrace.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index 45b1adde3abe..a0f497bcbb4f 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c @@ -38,6 +38,9 @@ static void save_stack_address(void *data, unsigned long addr, int reliable) { struct stack_trace *trace = data; + if (!reliable) + return; + if (trace->skip > 0) { trace->skip--; return; @@ -67,6 +70,9 @@ save_stack_address_nosched(void *data, unsigned long addr, int reliable) { struct stack_trace *trace = (struct stack_trace *)data; + if (!reliable) + return; + if (in_sched_functions(addr)) return; -- cgit v1.2.3-59-g8ed1b From f9967e23c10f025d85bbf66a2a5f18f016890ccb Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 01:09:03 +0900 Subject: sh: flag the default unwinder as reliable. This flags the default unwinder as reliable, as it tends to be reliable enough for the purposes of the stacktrace buffer. We leave the unreliable cases for the unwind methods that we know to be completely broken. Signed-off-by: Paul Mundt --- arch/sh/kernel/dumpstack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c index 005dc1d11460..6f5ad1513409 100644 --- a/arch/sh/kernel/dumpstack.c +++ b/arch/sh/kernel/dumpstack.c @@ -61,7 +61,7 @@ stack_reader_dump(struct task_struct *task, struct pt_regs *regs, unsigned long addr = *sp++; if (__kernel_text_address(addr)) { - ops->address(data, addr, 0); + ops->address(data, addr, 1); print_ftrace_graph_addr(addr, data, ops, context, &graph); -- cgit v1.2.3-59-g8ed1b From 606b4c992f3b28f906100f1b6eb49059909d8da7 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 01:11:37 +0900 Subject: sh: stacktrace: Properly terminate the trace entry buffer. This inserts a ULONG_MAX entry at the end of the valid entries in the stack trace buffer so the default code doesn't need to scan to the end of available slots. This also makes the trace buffer termination behaviour consistent with the other architectures. Signed-off-by: Paul Mundt --- arch/sh/kernel/stacktrace.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index a0f497bcbb4f..c2e45c48409c 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c @@ -62,6 +62,8 @@ void save_stack_trace(struct stack_trace *trace) unsigned long *sp = (unsigned long *)current_stack_pointer; unwind_stack(current, NULL, sp, &save_stack_ops, trace); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace); @@ -97,5 +99,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) unsigned long *sp = (unsigned long *)tsk->thread.sp; unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); -- cgit v1.2.3-59-g8ed1b From 112e58471de3431fbd03dee514777ad4a66a77b2 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 02:49:40 +0900 Subject: sh: TLB protection violation exception optimizations. This adds a bit of rework to have the TLB protection violations skip the TLB miss fastpath and go directly in to do_page_fault(), as these require slow path handling. Based on an earlier patch by SUGIOKA Toshinobu. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/entry.S | 30 +++++++++++++++++++----------- arch/sh/mm/fault_32.c | 6 +++--- 2 files changed, 22 insertions(+), 14 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 3cb531f233f2..bbaf2bd118e7 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -113,34 +113,33 @@ OFF_TRA = (16*4+6*4) #if defined(CONFIG_MMU) .align 2 ENTRY(tlb_miss_load) - bra call_dpf + bra call_handle_tlbmiss mov #0, r5 .align 2 ENTRY(tlb_miss_store) - bra call_dpf + bra call_handle_tlbmiss mov #1, r5 .align 2 ENTRY(initial_page_write) - bra call_dpf + bra call_handle_tlbmiss mov #1, r5 .align 2 ENTRY(tlb_protection_violation_load) - bra call_dpf + bra call_do_page_fault mov #0, r5 .align 2 ENTRY(tlb_protection_violation_store) - bra call_dpf + bra call_do_page_fault mov #1, r5 -call_dpf: +call_handle_tlbmiss: mov.l 1f, r0 mov r5, r8 mov.l @r0, r6 - mov r6, r9 mov.l 2f, r0 sts pr, r10 jsr @r0 @@ -151,16 +150,25 @@ call_dpf: lds r10, pr rts nop -0: mov.l 3f, r0 - mov r9, r6 +0: mov r8, r5 +call_do_page_fault: + mov.l 1f, r0 + mov.l @r0, r6 + + sti + + mov.l 3f, r0 + mov.l 4f, r1 + mov r15, r4 jmp @r0 - mov r15, r4 + lds r1, pr .align 2 1: .long MMU_TEA -2: .long __do_page_fault +2: .long handle_tlbmiss 3: .long do_page_fault +4: .long ret_from_exception .align 2 ENTRY(address_error_load) diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index dbbdeba2cee5..41840647f65f 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -318,9 +318,9 @@ do_sigbus: /* * Called with interrupts disabled. */ -asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, - unsigned long writeaccess, - unsigned long address) +asmlinkage int __kprobes +handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, + unsigned long address) { pgd_t *pgd; pud_t *pud; -- cgit v1.2.3-59-g8ed1b From 8010fbe7a67c2f993cbb11b9d8b7e98528256dd1 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 03:06:41 +0900 Subject: sh: TLB fast path optimizations for load/store exceptions. This only bothers with the TLB entry flush in the case of the initial page write exception, as it is unecessary in the case of the load/store exceptions. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/entry.S | 2 +- arch/sh/mm/fault_32.c | 26 +++++++++++++------------- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index bbaf2bd118e7..a701fac8ed42 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -124,7 +124,7 @@ ENTRY(tlb_miss_store) .align 2 ENTRY(initial_page_write) bra call_handle_tlbmiss - mov #1, r5 + mov #2, r5 .align 2 ENTRY(tlb_protection_violation_load) diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 41840647f65f..f1c93c880ed4 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -327,7 +327,6 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, pmd_t *pmd; pte_t *pte; pte_t entry; - int ret = 1; /* * We don't take page faults for P1, P2, and parts of P4, these @@ -338,40 +337,41 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, pgd = pgd_offset_k(address); } else { if (unlikely(address >= TASK_SIZE || !current->mm)) - goto out; + return 1; pgd = pgd_offset(current->mm, address); } pud = pud_offset(pgd, address); if (pud_none_or_clear_bad(pud)) - goto out; + return 1; pmd = pmd_offset(pud, address); if (pmd_none_or_clear_bad(pmd)) - goto out; + return 1; pte = pte_offset_kernel(pmd, address); entry = *pte; if (unlikely(pte_none(entry) || pte_not_present(entry))) - goto out; + return 1; if (unlikely(writeaccess && !pte_write(entry))) - goto out; + return 1; if (writeaccess) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); + set_pte(pte, entry); + #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) /* - * ITLB is not affected by "ldtlb" instruction. - * So, we need to flush the entry by ourselves. + * SH-4 does not set MMUCR.RC to the corresponding TLB entry in + * the case of an initial page write exception, so we need to + * flush it in order to avoid potential TLB entry duplication. */ - local_flush_tlb_one(get_asid(), address & PAGE_MASK); + if (writeaccess == 2) + local_flush_tlb_one(get_asid(), address & PAGE_MASK); #endif - set_pte(pte, entry); update_mmu_cache(NULL, address, entry); - ret = 0; -out: - return ret; + return 0; } -- cgit v1.2.3-59-g8ed1b From 7a90e00dda0bae66b5232d5a37155f13a0581369 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 07:41:45 +0900 Subject: sh: Make sure rte delay slots are nopped out on all parts. Future SH parts do not support any instruction but a nop in the rte delay slot, so make the change for all offending parts. SH-5 is excluded from this, and already has its own set of restrictions with regards to rte delay slot handling. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh2/entry.S | 3 ++- arch/sh/kernel/cpu/sh2a/entry.S | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S index becc54c45692..c8a4331d9b8d 100644 --- a/arch/sh/kernel/cpu/sh2/entry.S +++ b/arch/sh/kernel/cpu/sh2/entry.S @@ -227,8 +227,9 @@ ENTRY(sh_bios_handler) mov.l @r15+, r14 add #8,r15 lds.l @r15+, pr + mov.l @r15+,r15 rte - mov.l @r15+,r15 + nop .align 2 1: .long gdb_vbr_vector #endif /* CONFIG_SH_STANDARD_BIOS */ diff --git a/arch/sh/kernel/cpu/sh2a/entry.S b/arch/sh/kernel/cpu/sh2a/entry.S index ab3903eeda5c..222742ddc0d6 100644 --- a/arch/sh/kernel/cpu/sh2a/entry.S +++ b/arch/sh/kernel/cpu/sh2a/entry.S @@ -176,8 +176,9 @@ ENTRY(sh_bios_handler) movml.l @r15+,r14 add #8,r15 lds.l @r15+, pr + mov.l @r15+,r15 rte - mov.l @r15+,r15 + nop .align 2 1: .long gdb_vbr_vector #endif /* CONFIG_SH_STANDARD_BIOS */ -- cgit v1.2.3-59-g8ed1b From 7dd6662a92fe9a15ad565045aa60367995cc533d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 07:43:21 +0900 Subject: sh: delay slot future proofing via EXPMASK on SH-4A parts. This implements EXPMASK initialization code for SH-4A parts, where it is possible to disable compat features that will go away in newer cores. Presently this includes disabling support for non-nop instructions in the rte delay slot, as well as a sleep instruction being placed in a delay slot (neither of which the kernel does any longer). As a result of this, any future offenders will have illegal slot exceptions generated for them. Associative writes for the memory-mapped cache array are still left enabled, until such a point that special cache operations for SH-4A are provided to move off of the current (and rather dated) SH-4 versions. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/init.c | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index ad85421099cd..d40b9db5be03 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -3,7 +3,7 @@ * * CPU init code * - * Copyright (C) 2002 - 2007 Paul Mundt + * Copyright (C) 2002 - 2009 Paul Mundt * Copyright (C) 2003 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public @@ -62,6 +62,37 @@ static void __init speculative_execution_init(void) #define speculative_execution_init() do { } while (0) #endif +#ifdef CONFIG_CPU_SH4A +#define EXPMASK 0xff2f0004 +#define EXPMASK_RTEDS (1 << 0) +#define EXPMASK_BRDSSLP (1 << 1) +#define EXPMASK_MMCAW (1 << 4) + +static void __init expmask_init(void) +{ + unsigned long expmask = __raw_readl(EXPMASK); + + /* + * Future proofing. + * + * Disable support for slottable sleep instruction + * and non-nop instructions in the rte delay slot. + */ + expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP); + + /* + * Enable associative writes to the memory-mapped cache array + * until the cache flush ops have been rewritten. + */ + expmask |= EXPMASK_MMCAW; + + __raw_writel(expmask, EXPMASK); + ctrl_barrier(); +} +#else +#define expmask_init() do { } while (0) +#endif + /* 2nd-level cache init */ void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void) { @@ -321,4 +352,5 @@ asmlinkage void __init sh_cpu_init(void) #endif speculative_execution_init(); + expmask_init(); } -- cgit v1.2.3-59-g8ed1b From 180aa6e6aa11922dcd4c13df1967d62bb2ede76c Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 15 Aug 2009 00:04:00 +0100 Subject: sh: Set the cfa_offset to 0 if we see a DW_CFA_def_cfa_register op The way that the CFA is calculated can change as we progress through a function. If we see a DW_CFA_def_cfa_register op we need to reset the frame's cfa_offset value which may have been previously setup. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/dwarf.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index db021361b161..c6c5764a8ab1 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -449,6 +449,7 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, count = dwarf_read_uleb128(current_insn, &frame->cfa_register); current_insn += count; + frame->cfa_offset = 0; frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; break; case DW_CFA_def_cfa_offset: -- cgit v1.2.3-59-g8ed1b From dde5e3ffb770ef2854bbc32c51a365e932919e19 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 09:49:32 +0900 Subject: sh: rework nommu for generic cache.c use. This does a bit of reorganizing for allowing nommu to use the new and generic cache.c, no functional changes. Signed-off-by: Paul Mundt --- arch/sh/include/asm/cacheflush.h | 2 ++ arch/sh/include/asm/page.h | 7 +------ arch/sh/include/cpu-sh3/cpu/cacheflush.h | 2 -- arch/sh/include/cpu-sh4/cpu/cacheflush.h | 2 -- arch/sh/kernel/cpu/init.c | 2 -- arch/sh/mm/mmap.c | 2 +- arch/sh/mm/tlb-nommu.c | 5 ----- 7 files changed, 4 insertions(+), 18 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index 4bf621e4146d..9ec13fb909dd 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -76,5 +76,7 @@ void kmap_coherent_init(void); void *kmap_coherent(struct page *page, unsigned long addr); void kunmap_coherent(void); +#define PG_dcache_dirty PG_arch_1 + #endif /* __KERNEL__ */ #endif /* __ASM_SH_CACHEFLUSH_H */ diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 847eeabb9083..a316eeb50b39 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -68,18 +68,13 @@ extern void clear_user_page(void *to, unsigned long address, struct page *page); extern void copy_user_page(void *to, void *from, unsigned long address, struct page *page); -#elif defined(CONFIG_MMU) +#else extern void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); #define __HAVE_ARCH_COPY_USER_HIGHPAGE extern void clear_user_highpage(struct page *page, unsigned long vaddr); #define clear_user_highpage clear_user_highpage -#else - -#define clear_user_page(page, vaddr, pg) clear_page(page) -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) - #endif /* diff --git a/arch/sh/include/cpu-sh3/cpu/cacheflush.h b/arch/sh/include/cpu-sh3/cpu/cacheflush.h index 6485ad5649ad..3b5f3df4e1c8 100644 --- a/arch/sh/include/cpu-sh3/cpu/cacheflush.h +++ b/arch/sh/include/cpu-sh3/cpu/cacheflush.h @@ -15,8 +15,6 @@ * SH4. Unlike the SH4 this is a unified cache so we need to do some work * in mmap when 'exec'ing a new binary */ -#define PG_dcache_dirty PG_arch_1 - void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); #define flush_cache_dup_mm(mm) flush_cache_mm(mm) diff --git a/arch/sh/include/cpu-sh4/cpu/cacheflush.h b/arch/sh/include/cpu-sh4/cpu/cacheflush.h index 3564f1722195..76764f0fb88a 100644 --- a/arch/sh/include/cpu-sh4/cpu/cacheflush.h +++ b/arch/sh/include/cpu-sh4/cpu/cacheflush.h @@ -38,6 +38,4 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, /* Initialization of P3 area for copy_user_page */ void p3_cache_init(void); -#define PG_dcache_dirty PG_arch_1 - #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index ad85421099cd..c832fa4cf8ed 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -268,11 +268,9 @@ asmlinkage void __init sh_cpu_init(void) cache_init(); if (raw_smp_processor_id() == 0) { -#ifdef CONFIG_MMU shm_align_mask = max_t(unsigned long, current_cpu_data.dcache.way_size - 1, PAGE_SIZE - 1); -#endif /* Boot CPU sets the cache shape */ detect_cache_shape(); diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 1b5fdfb4e0c2..d2984fa42d3d 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -14,10 +14,10 @@ #include #include -#ifdef CONFIG_MMU unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ EXPORT_SYMBOL(shm_align_mask); +#ifdef CONFIG_MMU /* * To avoid cache aliases, we map the shared page with same color. */ diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c index c49e9d24c2e6..0710ebb99b9a 100644 --- a/arch/sh/mm/tlb-nommu.c +++ b/arch/sh/mm/tlb-nommu.c @@ -50,11 +50,6 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { } -void __update_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) -{ -} - void __init kmap_coherent_init(void) { } -- cgit v1.2.3-59-g8ed1b From e82da214d2fe3dc2610df966100c4f36bc0fad91 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 10:48:13 +0900 Subject: sh: Track the CPU family in sh_cpuinfo. This adds a family member to struct sh_cpuinfo, which allows us to fall back more on the probe routines to work out what sort of subtype we are running on. This will be used by the CPU cache initialization code in order to first do family-level initialization, followed by subtype-level optimizations. Signed-off-by: Paul Mundt --- arch/sh/include/asm/bugs.h | 24 ++++++++++++------------ arch/sh/include/asm/processor.h | 13 ++++++++++++- arch/sh/kernel/cpu/sh2/probe.c | 1 + arch/sh/kernel/cpu/sh2a/probe.c | 2 ++ arch/sh/kernel/cpu/sh3/probe.c | 2 ++ arch/sh/kernel/cpu/sh4/probe.c | 9 +++++++-- arch/sh/kernel/cpu/sh5/probe.c | 2 ++ arch/sh/kernel/setup.c | 1 + 8 files changed, 39 insertions(+), 15 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h index 4924ff6f5439..46260fcbdf4b 100644 --- a/arch/sh/include/asm/bugs.h +++ b/arch/sh/include/asm/bugs.h @@ -21,25 +21,25 @@ static void __init check_bugs(void) current_cpu_data.loops_per_jiffy = loops_per_jiffy; - switch (current_cpu_data.type) { - case CPU_SH7619: + switch (current_cpu_data.family) { + case CPU_FAMILY_SH2: *p++ = '2'; break; - case CPU_SH7201 ... CPU_MXG: + case CPU_FAMILY_SH2A: *p++ = '2'; *p++ = 'a'; break; - case CPU_SH7705 ... CPU_SH7729: + case CPU_FAMILY_SH3: *p++ = '3'; break; - case CPU_SH7750 ... CPU_SH4_501: + case CPU_FAMILY_SH4: *p++ = '4'; break; - case CPU_SH7763 ... CPU_SHX3: + case CPU_FAMILY_SH4A: *p++ = '4'; *p++ = 'a'; break; - case CPU_SH7343 ... CPU_SH7366: + case CPU_FAMILY_SH4AL_DSP: *p++ = '4'; *p++ = 'a'; *p++ = 'l'; @@ -48,15 +48,15 @@ static void __init check_bugs(void) *p++ = 's'; *p++ = 'p'; break; - case CPU_SH5_101 ... CPU_SH5_103: + case CPU_FAMILY_SH5: *p++ = '6'; *p++ = '4'; break; - case CPU_SH_NONE: + case CPU_FAMILY_UNKNOWN: /* - * Specifically use CPU_SH_NONE rather than default:, - * so we're able to have the compiler whine about - * unhandled enumerations. + * Specifically use CPU_FAMILY_UNKNOWN rather than + * default:, so we're able to have the compiler whine + * about unhandled enumerations. */ break; } diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index ff7daaf9a620..db1a4f3a755f 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -44,6 +44,17 @@ enum cpu_type { CPU_SH_NONE }; +enum cpu_family { + CPU_FAMILY_SH2, + CPU_FAMILY_SH2A, + CPU_FAMILY_SH3, + CPU_FAMILY_SH4, + CPU_FAMILY_SH4A, + CPU_FAMILY_SH4AL_DSP, + CPU_FAMILY_SH5, + CPU_FAMILY_UNKNOWN, +}; + /* * TLB information structure * @@ -61,7 +72,7 @@ struct tlb_info { }; struct sh_cpuinfo { - unsigned int type; + unsigned int type, family; int cut_major, cut_minor; unsigned long loops_per_jiffy; unsigned long asid_cache; diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c index 5916d9096b99..1db6d8883888 100644 --- a/arch/sh/kernel/cpu/sh2/probe.c +++ b/arch/sh/kernel/cpu/sh2/probe.c @@ -29,6 +29,7 @@ int __init detect_cpu_and_cache_system(void) */ boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED; boot_cpu_data.icache = boot_cpu_data.dcache; + boot_cpu_data.family = CPU_FAMILY_SH2; return 0; } diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c index e098e2f6aa08..6825d6507164 100644 --- a/arch/sh/kernel/cpu/sh2a/probe.c +++ b/arch/sh/kernel/cpu/sh2a/probe.c @@ -15,6 +15,8 @@ int __init detect_cpu_and_cache_system(void) { + boot_cpu_data.family = CPU_FAMILY_SH2A; + /* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */ boot_cpu_data.flags |= CPU_HAS_OP32; diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c index 10f2a760c5ee..f9c7df64eb01 100644 --- a/arch/sh/kernel/cpu/sh3/probe.c +++ b/arch/sh/kernel/cpu/sh3/probe.c @@ -107,5 +107,7 @@ int __uses_jump_to_uncached detect_cpu_and_cache_system(void) boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED; boot_cpu_data.icache = boot_cpu_data.dcache; + boot_cpu_data.family = CPU_FAMILY_SH3; + return 0; } diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index 6c78d0a9c857..1148607c36bf 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -57,8 +57,12 @@ int __init detect_cpu_and_cache_system(void) * Setup some generic flags we can probe on SH-4A parts */ if (((pvr >> 16) & 0xff) == 0x10) { - if ((cvr & 0x10000000) == 0) + boot_cpu_data.family = CPU_FAMILY_SH4A; + + if ((cvr & 0x10000000) == 0) { boot_cpu_data.flags |= CPU_HAS_DSP; + boot_cpu_data.family = CPU_FAMILY_SH4AL_DSP; + } boot_cpu_data.flags |= CPU_HAS_LLSC | CPU_HAS_PERF_COUNTER; boot_cpu_data.cut_major = pvr & 0x7f; @@ -68,6 +72,7 @@ int __init detect_cpu_and_cache_system(void) } else { /* And some SH-4 defaults.. */ boot_cpu_data.flags |= CPU_HAS_PTEA; + boot_cpu_data.family = CPU_FAMILY_SH4; } /* FPU detection works for everyone */ @@ -174,7 +179,7 @@ int __init detect_cpu_and_cache_system(void) break; default: - boot_cpu_data.type = CPU_SH_NONE; + boot_cpu_data.type = boot_cpu_data.family = CPU_SH_NONE; break; } diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c index 92ad844b5c12..521d05b3f7ba 100644 --- a/arch/sh/kernel/cpu/sh5/probe.c +++ b/arch/sh/kernel/cpu/sh5/probe.c @@ -34,6 +34,8 @@ int __init detect_cpu_and_cache_system(void) /* CPU.VCR aliased at CIR address on SH5-101 */ boot_cpu_data.type = CPU_SH5_101; + boot_cpu_data.family = CPU_FAMILY_SH5; + /* * First, setup some sane values for the I-cache. */ diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index ceb409bf7741..dc403e42bcab 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -49,6 +49,7 @@ struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = { [0] = { .type = CPU_SH_NONE, + .family = CPU_FAMILY_UNKNOWN, .loops_per_jiffy = 10000000, }, }; -- cgit v1.2.3-59-g8ed1b From eccee7457df2b1e1505522ab0064e577e16eed75 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 13:15:02 +0900 Subject: sh: Kill off the unhandled pvr case in SH-4 CPU probing. This is superfluous, as the default CPU type and family are already established by the initial cpuinfo definition. Given that we are still able to probe for the CPU family even if we are not able to detect the subtype, it's preferable to let the probing code fill out what it can and leave the rest. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4/probe.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index 1148607c36bf..695026a3f02d 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -177,9 +177,6 @@ int __init detect_cpu_and_cache_system(void) boot_cpu_data.icache.ways = 2; boot_cpu_data.dcache.ways = 2; - break; - default: - boot_cpu_data.type = boot_cpu_data.family = CPU_SH_NONE; break; } -- cgit v1.2.3-59-g8ed1b From d4f587c67fc39e0030ddd718675e252e208da4d7 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 14 Aug 2009 15:47:31 +0200 Subject: timekeeping: Increase granularity of read_persistent_clock() The persistent clock of some architectures (e.g. s390) have a better granularity than seconds. To reduce the delta between the host clock and the guest clock in a virtualized system change the read_persistent_clock function to return a struct timespec. Signed-off-by: Martin Schwidefsky Cc: Ingo Molnar Acked-by: John Stultz Cc: Daniel Walker LKML-Reference: <20090814134811.013873340@de.ibm.com> Signed-off-by: Thomas Gleixner --- arch/m68knommu/kernel/time.c | 5 ++-- arch/mips/dec/time.c | 5 ++-- arch/mips/lasat/ds1603.c | 5 ++-- arch/mips/lasat/sysctl.c | 8 ++++-- arch/mips/lemote/lm2e/setup.c | 5 ++-- arch/mips/mti-malta/malta-time.c | 5 ++-- arch/mips/pmc-sierra/yosemite/setup.c | 5 ++-- arch/mips/sibyte/swarm/setup.c | 15 +++++++--- arch/mips/sni/time.c | 5 ++-- arch/powerpc/kernel/time.c | 7 +++-- arch/s390/kernel/time.c | 22 +++------------ arch/sh/kernel/time.c | 6 ++-- arch/x86/kernel/rtc.c | 5 ++-- arch/xtensa/kernel/time.c | 5 ++-- include/linux/time.h | 2 +- kernel/time/timekeeping.c | 52 +++++++++++++++++++---------------- 16 files changed, 83 insertions(+), 74 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c index d182b2f72211..68432248515c 100644 --- a/arch/m68knommu/kernel/time.c +++ b/arch/m68knommu/kernel/time.c @@ -72,9 +72,10 @@ static unsigned long read_rtc_mmss(void) return mktime(year, mon, day, hour, min, sec);; } -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { - return read_rtc_mmss(); + ts->tv_sec = read_rtc_mmss(); + ts->tv_nsec = 0; } int update_persistent_clock(struct timespec now) diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c index 463136e6685a..02f505f23c32 100644 --- a/arch/mips/dec/time.c +++ b/arch/mips/dec/time.c @@ -18,7 +18,7 @@ #include #include -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { unsigned int year, mon, day, hour, min, sec, real_year; unsigned long flags; @@ -53,7 +53,8 @@ unsigned long read_persistent_clock(void) year += real_year - 72 + 2000; - return mktime(year, mon, day, hour, min, sec); + ts->tv_sec = mktime(year, mon, day, hour, min, sec); + ts->tv_nsec = 0; } /* diff --git a/arch/mips/lasat/ds1603.c b/arch/mips/lasat/ds1603.c index 52cb1436a12a..c6fd96ff118d 100644 --- a/arch/mips/lasat/ds1603.c +++ b/arch/mips/lasat/ds1603.c @@ -135,7 +135,7 @@ static void rtc_end_op(void) lasat_ndelay(1000); } -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { unsigned long word; unsigned long flags; @@ -147,7 +147,8 @@ unsigned long read_persistent_clock(void) rtc_end_op(); spin_unlock_irqrestore(&rtc_lock, flags); - return word; + ts->tv_sec = word; + ts->tv_nsec = 0; } int rtc_mips_set_mmss(unsigned long time) diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c index 8f88886feb12..3f04d4c406b7 100644 --- a/arch/mips/lasat/sysctl.c +++ b/arch/mips/lasat/sysctl.c @@ -92,10 +92,12 @@ static int rtctmp; int proc_dolasatrtc(ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp, loff_t *ppos) { + struct timespec ts; int r; if (!write) { - rtctmp = read_persistent_clock(); + read_persistent_clock(&ts); + rtctmp = ts.tv_sec; /* check for time < 0 and set to 0 */ if (rtctmp < 0) rtctmp = 0; @@ -134,9 +136,11 @@ int sysctl_lasat_rtc(ctl_table *table, void *oldval, size_t *oldlenp, void *newval, size_t newlen) { + struct timespec ts; int r; - rtctmp = read_persistent_clock(); + read_persistent_clock(&ts); + rtctmp = ts.tv_sec; if (rtctmp < 0) rtctmp = 0; r = sysctl_intvec(table, oldval, oldlenp, newval, newlen); diff --git a/arch/mips/lemote/lm2e/setup.c b/arch/mips/lemote/lm2e/setup.c index ebd6ceaef2fd..24b355df6127 100644 --- a/arch/mips/lemote/lm2e/setup.c +++ b/arch/mips/lemote/lm2e/setup.c @@ -54,9 +54,10 @@ void __init plat_time_init(void) mips_hpt_frequency = cpu_clock_freq / 2; } -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { - return mc146818_get_cmos_time(); + ts->tv_sec = mc146818_get_cmos_time(); + ts->tv_nsec = 0; } void (*__wbflush)(void); diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 0b97d47691fc..3c6f190aa61c 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -100,9 +100,10 @@ static unsigned int __init estimate_cpu_frequency(void) return count; } -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { - return mc146818_get_cmos_time(); + ts->tv_sec = mc146818_get_cmos_time(); + ts->tv_nsec = 0; } static void __init plat_perf_setup(void) diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c index 2d3c0dca275d..3498ac9c35af 100644 --- a/arch/mips/pmc-sierra/yosemite/setup.c +++ b/arch/mips/pmc-sierra/yosemite/setup.c @@ -70,7 +70,7 @@ void __init bus_error_init(void) } -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { unsigned int year, month, day, hour, min, sec; unsigned long flags; @@ -92,7 +92,8 @@ unsigned long read_persistent_clock(void) m48t37_base->control = 0x00; spin_unlock_irqrestore(&rtc_lock, flags); - return mktime(year, month, day, hour, min, sec); + ts->tv_sec = mktime(year, month, day, hour, min, sec); + ts->tv_nsec = 0; } int rtc_mips_set_time(unsigned long tim) diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c index 672e45d495a9..623ffc933c4c 100644 --- a/arch/mips/sibyte/swarm/setup.c +++ b/arch/mips/sibyte/swarm/setup.c @@ -87,19 +87,26 @@ enum swarm_rtc_type { enum swarm_rtc_type swarm_rtc_type; -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { + unsigned long sec; + switch (swarm_rtc_type) { case RTC_XICOR: - return xicor_get_time(); + sec = xicor_get_time(); + break; case RTC_M4LT81: - return m41t81_get_time(); + sec = m41t81_get_time(); + break; case RTC_NONE: default: - return mktime(2000, 1, 1, 0, 0, 0); + sec = mktime(2000, 1, 1, 0, 0, 0); + break; } + ts->tv_sec = sec; + tv->tv_nsec = 0; } int rtc_mips_set_time(unsigned long sec) diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c index 0d9ec1a5c24a..62df6a598e0a 100644 --- a/arch/mips/sni/time.c +++ b/arch/mips/sni/time.c @@ -182,7 +182,8 @@ void __init plat_time_init(void) setup_pit_timer(); } -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { - return -1; + ts->tv_sec = -1; + ts->tv_nsec = 0; } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index eae4511ceeac..ad63f30fe3da 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -769,7 +769,7 @@ int update_persistent_clock(struct timespec now) return ppc_md.set_rtc_time(&tm); } -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { struct rtc_time tm; static int first = 1; @@ -787,8 +787,9 @@ unsigned long read_persistent_clock(void) if (!ppc_md.get_rtc_time) return 0; ppc_md.get_rtc_time(&tm); - return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, - tm.tm_hour, tm.tm_min, tm.tm_sec); + ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec); + ts->tv_nsec = 0; } /* clocksource code */ diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index e76c2e7a8b9a..a94ec48587b4 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -182,12 +182,9 @@ static void timing_alert_interrupt(__u16 code) static void etr_reset(void); static void stp_reset(void); -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { - struct timespec ts; - - tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, &ts); - return ts.tv_sec; + tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts); } static cycle_t read_tod_clock(struct clocksource *cs) @@ -248,7 +245,6 @@ void __init time_init(void) { struct timespec ts; unsigned long flags; - cycle_t now; /* Reset time synchronization interfaces. */ etr_reset(); @@ -266,20 +262,10 @@ void __init time_init(void) panic("Could not register TOD clock source"); /* - * The TOD clock is an accurate clock. The xtime should be - * initialized in a way that the difference between TOD and - * xtime is reasonably small. Too bad that timekeeping_init - * sets xtime.tv_nsec to zero. In addition the clock source - * change from the jiffies clock source to the TOD clock - * source add another error of up to 1/HZ second. The same - * function sets wall_to_monotonic to a value that is too - * small for /proc/uptime to be accurate. - * Reset xtime and wall_to_monotonic to sane values. + * Reset wall_to_monotonic to the initial timestamp created + * in head.S to get a precise value in /proc/uptime. */ write_seqlock_irqsave(&xtime_lock, flags); - now = get_clock(); - tod_to_timeval(now - TOD_UNIX_EPOCH, &xtime); - clocksource_tod.cycle_last = now; tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, &ts); set_normalized_timespec(&wall_to_monotonic, -ts.tv_sec, -ts.tv_nsec); write_sequnlock_irqrestore(&xtime_lock, flags); diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index 9b352a1e3fb4..3f4706aa975e 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c @@ -39,11 +39,9 @@ void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; #ifdef CONFIG_GENERIC_CMOS_UPDATE -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { - struct timespec tv; - rtc_sh_get_time(&tv); - return tv.tv_sec; + rtc_sh_get_time(&ts); } int update_persistent_clock(struct timespec now) diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 5d465b207e72..bf67dcb4a44c 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -178,7 +178,7 @@ static int set_rtc_mmss(unsigned long nowtime) } /* not static: needed by APM */ -unsigned long read_persistent_clock(void) +void read_persistent_clock(struct timespec *ts) { unsigned long retval, flags; @@ -186,7 +186,8 @@ unsigned long read_persistent_clock(void) retval = get_wallclock(); spin_unlock_irqrestore(&rtc_lock, flags); - return retval; + ts->tv_sec = retval; + ts->tv_nsec = 0; } int update_persistent_clock(struct timespec now) diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 8848120d291b..19085ff0484a 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -59,9 +59,8 @@ static struct irqaction timer_irqaction = { void __init time_init(void) { - xtime.tv_nsec = 0; - xtime.tv_sec = read_persistent_clock(); - + /* FIXME: xtime&wall_to_monotonic are set in timekeeping_init. */ + read_persistent_clock(&xtime); set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); diff --git a/include/linux/time.h b/include/linux/time.h index e7c844558884..53a3216f0d1b 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -101,7 +101,7 @@ extern struct timespec xtime; extern struct timespec wall_to_monotonic; extern seqlock_t xtime_lock; -extern unsigned long read_persistent_clock(void); +extern void read_persistent_clock(struct timespec *ts); extern int update_persistent_clock(struct timespec now); extern int no_sync_cmos_clock __read_mostly; void timekeeping_init(void); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 41579e7fcf9d..f1a21ce491e6 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -154,7 +154,7 @@ __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); */ struct timespec xtime __attribute__ ((aligned (16))); struct timespec wall_to_monotonic __attribute__ ((aligned (16))); -static unsigned long total_sleep_time; /* seconds */ +static struct timespec total_sleep_time; /* * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. @@ -487,17 +487,18 @@ int timekeeping_valid_for_hres(void) } /** - * read_persistent_clock - Return time in seconds from the persistent clock. + * read_persistent_clock - Return time from the persistent clock. * * Weak dummy function for arches that do not yet support it. - * Returns seconds from epoch using the battery backed persistent clock. - * Returns zero if unsupported. + * Reads the time from the battery backed persistent clock. + * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported. * * XXX - Do be sure to remove it once all arches implement it. */ -unsigned long __attribute__((weak)) read_persistent_clock(void) +void __attribute__((weak)) read_persistent_clock(struct timespec *ts) { - return 0; + ts->tv_sec = 0; + ts->tv_nsec = 0; } /* @@ -507,7 +508,9 @@ void __init timekeeping_init(void) { struct clocksource *clock; unsigned long flags; - unsigned long sec = read_persistent_clock(); + struct timespec now; + + read_persistent_clock(&now); write_seqlock_irqsave(&xtime_lock, flags); @@ -518,19 +521,20 @@ void __init timekeeping_init(void) clock->enable(clock); timekeeper_setup_internals(clock); - xtime.tv_sec = sec; - xtime.tv_nsec = 0; + xtime.tv_sec = now.tv_sec; + xtime.tv_nsec = now.tv_nsec; raw_time.tv_sec = 0; raw_time.tv_nsec = 0; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); update_xtime_cache(0); - total_sleep_time = 0; + total_sleep_time.tv_sec = 0; + total_sleep_time.tv_nsec = 0; write_sequnlock_irqrestore(&xtime_lock, flags); } /* time in seconds when suspend began */ -static unsigned long timekeeping_suspend_time; +static struct timespec timekeeping_suspend_time; /** * timekeeping_resume - Resumes the generic timekeeping subsystem. @@ -543,18 +547,19 @@ static unsigned long timekeeping_suspend_time; static int timekeeping_resume(struct sys_device *dev) { unsigned long flags; - unsigned long now = read_persistent_clock(); + struct timespec ts; + + read_persistent_clock(&ts); clocksource_resume(); write_seqlock_irqsave(&xtime_lock, flags); - if (now && (now > timekeeping_suspend_time)) { - unsigned long sleep_length = now - timekeeping_suspend_time; - - xtime.tv_sec += sleep_length; - wall_to_monotonic.tv_sec -= sleep_length; - total_sleep_time += sleep_length; + if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { + ts = timespec_sub(ts, timekeeping_suspend_time); + xtime = timespec_add_safe(xtime, ts); + wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); + total_sleep_time = timespec_add_safe(total_sleep_time, ts); } update_xtime_cache(0); /* re-base the last cycle value */ @@ -577,7 +582,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state) { unsigned long flags; - timekeeping_suspend_time = read_persistent_clock(); + read_persistent_clock(&timekeeping_suspend_time); write_seqlock_irqsave(&xtime_lock, flags); timekeeping_forward_now(); @@ -801,9 +806,10 @@ void update_wall_time(void) */ void getboottime(struct timespec *ts) { - set_normalized_timespec(ts, - - (wall_to_monotonic.tv_sec + total_sleep_time), - - wall_to_monotonic.tv_nsec); + struct timespec boottime; + + boottime = timespec_add_safe(wall_to_monotonic, total_sleep_time); + set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); } /** @@ -812,7 +818,7 @@ void getboottime(struct timespec *ts) */ void monotonic_to_bootbased(struct timespec *ts) { - ts->tv_sec += total_sleep_time; + *ts = timespec_add_safe(*ts, total_sleep_time); } unsigned long get_seconds(void) -- cgit v1.2.3-59-g8ed1b From 1ee4ab09f38b77b3a5750429d456d6606b237924 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 16 Aug 2009 00:55:38 +0900 Subject: sh64: Build in support for generic stacktrace ops. Signed-off-by: Paul Mundt --- arch/sh/kernel/Makefile_64 | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 index 639ee514266c..cdfec1e499a0 100644 --- a/arch/sh/kernel/Makefile_64 +++ b/arch/sh/kernel/Makefile_64 @@ -1,8 +1,9 @@ extra-y := head_64.o init_task.o vmlinux.lds -obj-y := debugtraps.o idle.o io.o io_generic.o irq.o machvec.o process_64.o \ - ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \ - syscalls_64.o time.o topology.o traps.o traps_64.o unwinder.o +obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ + machvec.o process_64.o ptrace_64.o setup.o signal_64.o \ + sys_sh.o sys_sh64.o syscalls_64.o time.o topology.o \ + traps.o traps_64.o unwinder.o obj-y += cpu/ obj-$(CONFIG_SMP) += smp.o -- cgit v1.2.3-59-g8ed1b From 94ecd224c940830e2f2724c3860eb7fb74c15d31 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 16 Aug 2009 01:50:17 +0900 Subject: sh: Fix up the SH-5 build with caches enabled. Signed-off-by: Paul Mundt --- arch/sh/include/asm/system.h | 14 +-- arch/sh/include/asm/system_32.h | 10 ++ arch/sh/include/asm/system_64.h | 5 + arch/sh/kernel/sh_ksyms_64.c | 8 -- arch/sh/mm/cache-sh5.c | 249 ++++------------------------------------ arch/sh/mm/flush-sh4.c | 81 +++++-------- 6 files changed, 64 insertions(+), 303 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index ab79e1f4fbe0..bf7c4cbde372 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -14,18 +14,6 @@ #define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ -#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) -#define __icbi() \ -{ \ - unsigned long __addr; \ - __addr = 0xa8000000; \ - __asm__ __volatile__( \ - "icbi %0\n\t" \ - : /* no output */ \ - : "m" (__m(__addr))); \ -} -#endif - /* * A brief note on ctrl_barrier(), the control register write barrier. * @@ -44,7 +32,7 @@ #define mb() __asm__ __volatile__ ("synco": : :"memory") #define rmb() mb() #define wmb() __asm__ __volatile__ ("synco": : :"memory") -#define ctrl_barrier() __icbi() +#define ctrl_barrier() __icbi(0xa8000000) #define read_barrier_depends() do { } while(0) #else #define mb() __asm__ __volatile__ ("": : :"memory") diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h index d7299d69ff79..5ddd2359f3ef 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/system_32.h @@ -63,6 +63,16 @@ do { \ #define __restore_dsp(tsk) do { } while (0) #endif +#if defined(CONFIG_CPU_SH4A) +#define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr)) +#else +#define __icbi(addr) mb() +#endif + +#define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr)) +#define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr)) +#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr)) + struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next); diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h index 218b54d9d660..8e4a03e7966c 100644 --- a/arch/sh/include/asm/system_64.h +++ b/arch/sh/include/asm/system_64.h @@ -37,6 +37,11 @@ do { \ #define jump_to_uncached() do { } while (0) #define back_to_cached() do { } while (0) +#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr)) +#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr)) +#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr)) +#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr)) + static inline reg_size_t register_align(void *val) { return (unsigned long long)(signed long long)(signed long)val; diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index f96c95c07c4b..d008e17eb257 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c @@ -30,14 +30,6 @@ extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); EXPORT_SYMBOL(dump_fpu); EXPORT_SYMBOL(kernel_thread); -#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) -EXPORT_SYMBOL(clear_user_page); -#endif - -#ifndef CONFIG_CACHE_OFF -EXPORT_SYMBOL(flush_dcache_page); -#endif - #ifdef CONFIG_VT EXPORT_SYMBOL(screen_info); #endif diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index a8f5142dc2cf..819e0f9e8dba 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c @@ -25,29 +25,6 @@ extern void __weak sh4__flush_region_init(void); /* Wired TLB entry for the D-cache */ static unsigned long long dtlb_cache_slot; -void __init cpu_cache_init(void) -{ - /* Reserve a slot for dcache colouring in the DTLB */ - dtlb_cache_slot = sh64_get_wired_dtlb_entry(); - - sh4__flush_region_init(); -} - -void __init kmap_coherent_init(void) -{ - /* XXX ... */ -} - -void *kmap_coherent(struct page *page, unsigned long addr) -{ - /* XXX ... */ - return NULL; -} - -void kunmap_coherent(void) -{ -} - #ifdef CONFIG_DCACHE_DISABLED #define sh64_dcache_purge_all() do { } while (0) #define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0) @@ -233,52 +210,6 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm, } } -/* - * Invalidate a small range of user context I-cache, not necessarily page - * (or even cache-line) aligned. - * - * Since this is used inside ptrace, the ASID in the mm context typically - * won't match current_asid. We'll have to switch ASID to do this. For - * safety, and given that the range will be small, do all this under cli. - * - * Note, there is a hazard that the ASID in mm->context is no longer - * actually associated with mm, i.e. if the mm->context has started a new - * cycle since mm was last active. However, this is just a performance - * issue: all that happens is that we invalidate lines belonging to - * another mm, so the owning process has to refill them when that mm goes - * live again. mm itself can't have any cache entries because there will - * have been a flush_cache_all when the new mm->context cycle started. - */ -static void sh64_icache_inv_user_small_range(struct mm_struct *mm, - unsigned long start, int len) -{ - unsigned long long eaddr = start; - unsigned long long eaddr_end = start + len; - unsigned long current_asid, mm_asid; - unsigned long flags; - unsigned long long epage_start; - - /* - * Align to start of cache line. Otherwise, suppose len==8 and - * start was at 32N+28 : the last 4 bytes wouldn't get invalidated. - */ - eaddr = L1_CACHE_ALIGN(start); - eaddr_end = start + len; - - mm_asid = cpu_asid(smp_processor_id(), mm); - local_irq_save(flags); - current_asid = switch_and_save_asid(mm_asid); - - epage_start = eaddr & PAGE_MASK; - - while (eaddr < eaddr_end) { - __asm__ __volatile__("icbi %0, 0" : : "r" (eaddr)); - eaddr += L1_CACHE_BYTES; - } - switch_and_save_asid(current_asid); - local_irq_restore(flags); -} - static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) { /* The icbi instruction never raises ITLBMISS. i.e. if there's not a @@ -564,7 +495,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm, * Invalidate the entire contents of both caches, after writing back to * memory any dirty data from the D-cache. */ -void flush_cache_all(void) +static void sh5_flush_cache_all(void) { sh64_dcache_purge_all(); sh64_icache_inv_all(); @@ -591,7 +522,7 @@ void flush_cache_all(void) * I-cache. This is similar to the lack of action needed in * flush_tlb_mm - see fault.c. */ -void flush_cache_mm(struct mm_struct *mm) +static void sh5_flush_cache_mm(struct mm_struct *mm) { sh64_dcache_purge_all(); } @@ -603,8 +534,8 @@ void flush_cache_mm(struct mm_struct *mm) * * Note, 'end' is 1 byte beyond the end of the range to flush. */ -void flush_cache_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) +static void sh5_flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; @@ -621,8 +552,8 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, * * Note, this is called with pte lock held. */ -void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, - unsigned long pfn) +static void sh5_flush_cache_page(struct vm_area_struct *vma, + unsigned long eaddr, unsigned long pfn) { sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); @@ -630,7 +561,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, sh64_icache_inv_user_page(vma, eaddr); } -void flush_dcache_page(struct page *page) +static void sh5_flush_dcache_page(struct page *page) { sh64_dcache_purge_phy_page(page_to_phys(page)); wmb(); @@ -644,39 +575,20 @@ void flush_dcache_page(struct page *page) * mapping, therefore it's guaranteed that there no cache entries for * the range in cache sets of the wrong colour. */ -void flush_icache_range(unsigned long start, unsigned long end) +static void sh5_flush_icache_range(unsigned long start, unsigned long end) { __flush_purge_region((void *)start, end); wmb(); sh64_icache_inv_kernel_range(start, end); } -/* - * Flush the range of user (defined by vma->vm_mm) address space starting - * at 'addr' for 'len' bytes from the cache. The range does not straddle - * a page boundary, the unique physical page containing the range is - * 'page'. This seems to be used mainly for invalidating an address - * range following a poke into the program text through the ptrace() call - * from another process (e.g. for BRK instruction insertion). - */ -static void flush_icache_user_range(struct vm_area_struct *vma, - struct page *page, unsigned long addr, int len) -{ - - sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr); - mb(); - - if (vma->vm_flags & VM_EXEC) - sh64_icache_inv_user_small_range(vma->vm_mm, addr, len); -} - /* * For the address range [start,end), write back the data from the * D-cache and invalidate the corresponding region of the I-cache for the * current process. Used to flush signal trampolines on the stack to * make them executable. */ -void flush_cache_sigtramp(unsigned long vaddr) +static void sh5_flush_cache_sigtramp(unsigned long vaddr) { unsigned long end = vaddr + L1_CACHE_BYTES; @@ -685,138 +597,19 @@ void flush_cache_sigtramp(unsigned long vaddr) sh64_icache_inv_current_user_range(vaddr, end); } -#ifdef CONFIG_MMU -/* - * These *MUST* lie in an area of virtual address space that's otherwise - * unused. - */ -#define UNIQUE_EADDR_START 0xe0000000UL -#define UNIQUE_EADDR_END 0xe8000000UL - -/* - * Given a physical address paddr, and a user virtual address user_eaddr - * which will eventually be mapped to it, create a one-off kernel-private - * eaddr mapped to the same paddr. This is used for creating special - * destination pages for copy_user_page and clear_user_page. - */ -static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, - unsigned long paddr) -{ - static unsigned long current_pointer = UNIQUE_EADDR_START; - unsigned long coloured_pointer; - - if (current_pointer == UNIQUE_EADDR_END) { - sh64_dcache_purge_all(); - current_pointer = UNIQUE_EADDR_START; - } - - coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | - (user_eaddr & CACHE_OC_SYN_MASK); - sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr); - - current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS); - - return coloured_pointer; -} - -static void sh64_copy_user_page_coloured(void *to, void *from, - unsigned long address) -{ - void *coloured_to; - - /* - * Discard any existing cache entries of the wrong colour. These are - * present quite often, if the kernel has recently used the page - * internally, then given it up, then it's been allocated to the user. - */ - sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to); - - coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to)); - copy_page(from, coloured_to); - - sh64_teardown_dtlb_cache_slot(); -} - -static void sh64_clear_user_page_coloured(void *to, unsigned long address) -{ - void *coloured_to; - - /* - * Discard any existing kernel-originated lines of the wrong - * colour (as above) - */ - sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to); - - coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to)); - clear_page(coloured_to); - - sh64_teardown_dtlb_cache_slot(); -} - -/* - * 'from' and 'to' are kernel virtual addresses (within the superpage - * mapping of the physical RAM). 'address' is the user virtual address - * where the copy 'to' will be mapped after. This allows a custom - * mapping to be used to ensure that the new copy is placed in the - * right cache sets for the user to see it without having to bounce it - * out via memory. Note however : the call to flush_page_to_ram in - * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one - * very important case! - * - * TBD : can we guarantee that on every call, any cache entries for - * 'from' are in the same colour sets as 'address' also? i.e. is this - * always used just to deal with COW? (I suspect not). - * - * There are two possibilities here for when the page 'from' was last accessed: - * - by the kernel : this is OK, no purge required. - * - by the/a user (e.g. for break_COW) : need to purge. - * - * If the potential user mapping at 'address' is the same colour as - * 'from' there is no need to purge any cache lines from the 'from' - * page mapped into cache sets of colour 'address'. (The copy will be - * accessing the page through 'from'). - */ -void copy_user_page(void *to, void *from, unsigned long address, - struct page *page) +void __init sh5_cache_init(void) { - if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) - sh64_dcache_purge_coloured_phy_page(__pa(from), address); + flush_cache_all = sh5_flush_cache_all; + flush_cache_mm = sh5_flush_cache_mm; + flush_cache_dup_mm = sh5_flush_cache_mm; + flush_cache_page = sh5_flush_cache_page; + flush_cache_range = sh5_flush_cache_range; + flush_dcache_page = sh5_flush_dcache_page; + flush_icache_range = sh5_flush_icache_range; + flush_cache_sigtramp = sh5_flush_cache_sigtramp; - if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) - copy_page(to, from); - else - sh64_copy_user_page_coloured(to, from, address); -} - -/* - * 'to' is a kernel virtual address (within the superpage mapping of the - * physical RAM). 'address' is the user virtual address where the 'to' - * page will be mapped after. This allows a custom mapping to be used to - * ensure that the new copy is placed in the right cache sets for the - * user to see it without having to bounce it out via memory. - */ -void clear_user_page(void *to, unsigned long address, struct page *page) -{ - if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) - clear_page(to); - else - sh64_clear_user_page_coloured(to, address); -} - -void copy_to_user_page(struct vm_area_struct *vma, struct page *page, - unsigned long vaddr, void *dst, const void *src, - unsigned long len) -{ - flush_cache_page(vma, vaddr, page_to_pfn(page)); - memcpy(dst, src, len); - flush_icache_user_range(vma, page, vaddr, len); -} + /* Reserve a slot for dcache colouring in the DTLB */ + dtlb_cache_slot = sh64_get_wired_dtlb_entry(); -void copy_from_user_page(struct vm_area_struct *vma, struct page *page, - unsigned long vaddr, void *dst, const void *src, - unsigned long len) -{ - flush_cache_page(vma, vaddr, page_to_pfn(page)); - memcpy(dst, src, len); + sh4__flush_region_init(); } -#endif diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c index 99c50dc7551e..cef402678f42 100644 --- a/arch/sh/mm/flush-sh4.c +++ b/arch/sh/mm/flush-sh4.c @@ -19,28 +19,19 @@ static void sh4__flush_wback_region(void *start, int size) cnt = (end - v) / L1_CACHE_BYTES; while (cnt >= 8) { - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; cnt -= 8; } while (cnt) { - asm volatile("ocbwb @%0" : : "r" (v)); - v += L1_CACHE_BYTES; + __ocbwb(v); v += L1_CACHE_BYTES; cnt--; } } @@ -62,27 +53,18 @@ static void sh4__flush_purge_region(void *start, int size) cnt = (end - v) / L1_CACHE_BYTES; while (cnt >= 8) { - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; cnt -= 8; } while (cnt) { - asm volatile("ocbp @%0" : : "r" (v)); - v += L1_CACHE_BYTES; + __ocbp(v); v += L1_CACHE_BYTES; cnt--; } } @@ -101,28 +83,19 @@ static void sh4__flush_invalidate_region(void *start, int size) cnt = (end - v) / L1_CACHE_BYTES; while (cnt >= 8) { - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; cnt -= 8; } while (cnt) { - asm volatile("ocbi @%0" : : "r" (v)); - v += L1_CACHE_BYTES; + __ocbi(v); v += L1_CACHE_BYTES; cnt--; } } -- cgit v1.2.3-59-g8ed1b From 38f9ddf44150c1a213b41726384d055f7c35ec4f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 16 Aug 2009 03:35:26 +0900 Subject: sh: Merge the _32/_64 variants of arch/sh/kernel/Makefile. This uses the BITS export as per x86 in order to allow the same Makefile to be used. Signed-off-by: Paul Mundt --- arch/sh/kernel/Makefile | 43 +++++++++++++++++++++++++++++++++++++++---- arch/sh/kernel/Makefile_32 | 40 ---------------------------------------- arch/sh/kernel/Makefile_64 | 20 -------------------- 3 files changed, 39 insertions(+), 64 deletions(-) delete mode 100644 arch/sh/kernel/Makefile_32 delete mode 100644 arch/sh/kernel/Makefile_64 (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index 349d833deab5..f37cf02ad9be 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile @@ -1,5 +1,40 @@ -ifeq ($(CONFIG_SUPERH32),y) -include ${srctree}/arch/sh/kernel/Makefile_32 -else -include ${srctree}/arch/sh/kernel/Makefile_64 +# +# Makefile for the Linux/SuperH kernel. +# + +extra-y := head_$(BITS).o init_task.o vmlinux.lds + +ifdef CONFIG_FUNCTION_TRACER +# Do not profile debug and lowlevel utilities +CFLAGS_REMOVE_ftrace.o = -pg endif + +obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ + machvec.o process_$(BITS).o ptrace_$(BITS).o setup.o \ + signal_$(BITS).o sys_sh.o sys_sh$(BITS).o syscalls_$(BITS).o \ + time.o topology.o traps.o traps_$(BITS).o unwinder.o + +obj-y += cpu/ +obj-$(CONFIG_VSYSCALL) += vsyscall/ +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o +obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o +obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_IO_TRAPPED) += io_trapped.o +obj-$(CONFIG_KPROBES) += kprobes.o +obj-$(CONFIG_GENERIC_GPIO) += gpio.o +obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o +obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o +obj-$(CONFIG_DUMP_CODE) += disassemble.o +obj-$(CONFIG_HIBERNATION) += swsusp.o +obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o + +obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o + +EXTRA_CFLAGS += -Werror diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 deleted file mode 100644 index f2245ebf0b31..000000000000 --- a/arch/sh/kernel/Makefile_32 +++ /dev/null @@ -1,40 +0,0 @@ -# -# Makefile for the Linux/SuperH kernel. -# - -extra-y := head_32.o init_task.o vmlinux.lds - -ifdef CONFIG_FUNCTION_TRACER -# Do not profile debug and lowlevel utilities -CFLAGS_REMOVE_ftrace.o = -pg -endif - -obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ - machvec.o process_32.o ptrace_32.o setup.o signal_32.o \ - sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \ - traps.o traps_32.o unwinder.o - -obj-y += cpu/ -obj-$(CONFIG_VSYSCALL) += vsyscall/ -obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o -obj-$(CONFIG_KGDB) += kgdb.o -obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o -obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o -obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o -obj-$(CONFIG_CRASH_DUMP) += crash_dump.o -obj-$(CONFIG_STACKTRACE) += stacktrace.o -obj-$(CONFIG_IO_TRAPPED) += io_trapped.o -obj-$(CONFIG_KPROBES) += kprobes.o -obj-$(CONFIG_GENERIC_GPIO) += gpio.o -obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o -obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o -obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o -obj-$(CONFIG_DUMP_CODE) += disassemble.o -obj-$(CONFIG_HIBERNATION) += swsusp.o -obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o - -obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o - -EXTRA_CFLAGS += -Werror diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 deleted file mode 100644 index 639ee514266c..000000000000 --- a/arch/sh/kernel/Makefile_64 +++ /dev/null @@ -1,20 +0,0 @@ -extra-y := head_64.o init_task.o vmlinux.lds - -obj-y := debugtraps.o idle.o io.o io_generic.o irq.o machvec.o process_64.o \ - ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \ - syscalls_64.o time.o topology.o traps.o traps_64.o unwinder.o - -obj-y += cpu/ -obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o -obj-$(CONFIG_MODULES) += sh_ksyms_64.o module.o -obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -obj-$(CONFIG_CRASH_DUMP) += crash_dump.o -obj-$(CONFIG_STACKTRACE) += stacktrace.o -obj-$(CONFIG_IO_TRAPPED) += io_trapped.o -obj-$(CONFIG_GENERIC_GPIO) += gpio.o -obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o - -obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o - -EXTRA_CFLAGS += -Werror -- cgit v1.2.3-59-g8ed1b From b955873bf530ee4b80e6c8b734521ad07cbaed7e Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 15 Aug 2009 23:10:57 +0100 Subject: sh: Try again at getting the initial return address for an unwind The previous hack for calculating the return address for the first frame we unwind (dwarf_unwinder_dump) didn't always work. The problem was that it assumed once it read the rule for calculating the return address, there would be no new rules for calculating it. This isn't true because the way in which the CFA is calculated can change as you progress through a function and the return address is figured out using the CFA. Therefore, the way to calculate the return address can change. So, instead of using some offset from the beginning of dwarf_unwind_stack which is just a flakey approach, and instead of executing instructions from the FDE until the return address is setup, we now figure out the pc in dwarf_unwind_stack() just before we call dwarf_cfa_execute_insns(). Signed-off-by: Matt Fleming --- arch/sh/kernel/dwarf.c | 41 ++++++----------------------------------- 1 file changed, 6 insertions(+), 35 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index c6c5764a8ab1..44e674ed2871 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -330,7 +330,6 @@ struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) * @fde: the FDE for this function * @frame: the instructions calculate the CFA for this frame * @pc: the program counter of the address we're interested in - * @define_ra: keep executing insns until the return addr reg is defined? * * Execute the Call Frame instruction sequence starting at * @insn_start and ending at @insn_end. The instructions describe @@ -342,36 +341,17 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, struct dwarf_cie *cie, struct dwarf_fde *fde, struct dwarf_frame *frame, - unsigned long pc, - bool define_ra) + unsigned long pc) { unsigned char insn; unsigned char *current_insn; unsigned int count, delta, reg, expr_len, offset; - bool seen_ra_reg; current_insn = insn_start; - /* - * If we're executing instructions for the dwarf_unwind_stack() - * FDE we need to keep executing instructions until the value of - * DWARF_ARCH_RA_REG is defined. See the comment in - * dwarf_unwind_stack() for more details. - */ - if (define_ra) - seen_ra_reg = false; - else - seen_ra_reg = true; - - while (current_insn < insn_end && (frame->pc <= pc || !seen_ra_reg) ) { + while (current_insn < insn_end && frame->pc <= pc) { insn = __raw_readb(current_insn++); - if (!seen_ra_reg) { - if (frame->num_regs >= DWARF_ARCH_RA_REG && - frame->regs[DWARF_ARCH_RA_REG].flags) - seen_ra_reg = true; - } - /* * Firstly, handle the opcodes that embed their operands * in the instructions. @@ -511,26 +491,17 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, struct dwarf_fde *fde; unsigned long addr; int i, offset; - bool define_ra = false; /* * If this is the first invocation of this recursive function we * need get the contents of a physical register to get the CFA * in order to begin the virtual unwinding of the stack. * - * Setting "define_ra" to true indictates that we want - * dwarf_cfa_execute_insns() to continue executing instructions - * until we know how to calculate the value of DWARF_ARCH_RA_REG - * (which we need in order to kick off the whole unwinding - * process). - * * NOTE: the return address is guaranteed to be setup by the * time this function makes its first function call. */ - if (!pc && !prev) { - pc = (unsigned long)&dwarf_unwind_stack; - define_ra = true; - } + if (!pc && !prev) + pc = (unsigned long)current_text_addr(); frame = kzalloc(sizeof(*frame), GFP_ATOMIC); if (!frame) @@ -566,11 +537,11 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, /* CIE initial instructions */ dwarf_cfa_execute_insns(cie->initial_instructions, cie->instructions_end, cie, fde, - frame, pc, false); + frame, pc); /* FDE instructions */ dwarf_cfa_execute_insns(fde->instructions, fde->end, cie, - fde, frame, pc, define_ra); + fde, frame, pc); /* Calculate the CFA */ switch (frame->flags) { -- cgit v1.2.3-59-g8ed1b From cd7246f0e2747bd2b43d25d0f63f05db182a62c0 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 16 Aug 2009 01:44:33 +0100 Subject: sh: Add support for DWARF GNU extensions Also, remove the "fix" to DW_CFA_def_cfa_register where we reset the frame's cfa_offset to 0. This action is incorrect when handling DW_CFA_def_cfa_register as the DWARF spec specifically states that the previous contents of cfa_offset should be used with the new register. The reason that I thought cfa_offset should be reset to 0 was because it was being assigned a bogus value prior to executing the DW_CFA_def_cfa_register op. It turns out that the bogus cfa_offset value came from interpreting .cfi_escape pseudo-ops (those used by the GNU extensions) as CFA_DW_def_cfa ops. Signed-off-by: Matt Fleming --- arch/sh/include/asm/dwarf.h | 4 ++++ arch/sh/kernel/dwarf.c | 14 +++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index 60b180728d8d..d3d3837c5e1b 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h @@ -340,6 +340,10 @@ struct dwarf_stack { #define DW_CFA_lo_user 0x1c #define DW_CFA_hi_user 0x3f +/* GNU extension opcodes */ +#define DW_CFA_GNU_args_size 0x2e +#define DW_CFA_GNU_negative_offset_extended 0x2f + /* * Some call frame instructions encode their operands in the opcode. We * need some helper functions to extract both the opcode and operands diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 44e674ed2871..5fd6e604816d 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -429,7 +429,6 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, count = dwarf_read_uleb128(current_insn, &frame->cfa_register); current_insn += count; - frame->cfa_offset = 0; frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; break; case DW_CFA_def_cfa_offset: @@ -465,6 +464,19 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, frame->regs[reg].flags |= DWARF_REG_OFFSET; frame->regs[reg].addr = offset; break; + case DW_CFA_GNU_args_size: + count = dwarf_read_uleb128(current_insn, &offset); + current_insn += count; + break; + case DW_CFA_GNU_negative_offset_extended: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + count = dwarf_read_uleb128(current_insn, &offset); + offset *= cie->data_alignment_factor; + dwarf_frame_alloc_regs(frame, reg); + frame->regs[reg].flags |= DWARF_REG_OFFSET; + frame->regs[reg].addr = -offset; + break; default: pr_debug("unhandled DWARF instruction 0x%x\n", insn); break; -- cgit v1.2.3-59-g8ed1b From 97f361e2498ada54b48a235619eaf5af8e46427e Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 17 Aug 2009 05:07:38 +0900 Subject: sh: unwinder: Move initialization to early_initcall() and tidy up locking. This moves the initialization over to an early_initcall(). This fixes up some lockdep interaction issues. At the same time, kill off some superfluous locking in the init path. Signed-off-by: Paul Mundt --- arch/sh/include/asm/dwarf.h | 2 -- arch/sh/kernel/dwarf.c | 32 +++++++++++++++----------------- arch/sh/kernel/irq.c | 4 ---- 3 files changed, 15 insertions(+), 23 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index d3d3837c5e1b..2fbe8720411e 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h @@ -370,8 +370,6 @@ static inline unsigned int DW_CFA_operand(unsigned long insn) #define DW_EXT_HI 0xffffffff #define DW_EXT_DWARF64 DW_EXT_HI -extern void dwarf_unwinder_init(void); - extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, struct dwarf_frame *); #endif /* __ASSEMBLY__ */ diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 5fd6e604816d..d0652153f576 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -26,10 +26,10 @@ #include static LIST_HEAD(dwarf_cie_list); -DEFINE_SPINLOCK(dwarf_cie_lock); +static DEFINE_SPINLOCK(dwarf_cie_lock); static LIST_HEAD(dwarf_fde_list); -DEFINE_SPINLOCK(dwarf_fde_lock); +static DEFINE_SPINLOCK(dwarf_fde_lock); static struct dwarf_cie *cached_cie; @@ -264,7 +264,7 @@ static inline int dwarf_entry_len(char *addr, unsigned long *len) */ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) { - struct dwarf_cie *cie, *n; + struct dwarf_cie *cie; unsigned long flags; spin_lock_irqsave(&dwarf_cie_lock, flags); @@ -278,7 +278,7 @@ static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) goto out; } - list_for_each_entry_safe(cie, n, &dwarf_cie_list, link) { + list_for_each_entry(cie, &dwarf_cie_list, link) { if (cie->cie_pointer == cie_ptr) { cached_cie = cie; break; @@ -299,11 +299,12 @@ out: */ struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) { + struct dwarf_fde *fde; unsigned long flags; - struct dwarf_fde *fde, *n; spin_lock_irqsave(&dwarf_fde_lock, flags); - list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) { + + list_for_each_entry(fde, &dwarf_fde_list, link) { unsigned long start, end; start = fde->initial_location; @@ -787,24 +788,19 @@ static struct unwinder dwarf_unwinder = { static void dwarf_unwinder_cleanup(void) { - struct dwarf_cie *cie, *m; - struct dwarf_fde *fde, *n; - unsigned long flags; + struct dwarf_cie *cie; + struct dwarf_fde *fde; /* * Deallocate all the memory allocated for the DWARF unwinder. * Traverse all the FDE/CIE lists and remove and free all the * memory associated with those data structures. */ - spin_lock_irqsave(&dwarf_cie_lock, flags); - list_for_each_entry_safe(cie, m, &dwarf_cie_list, link) + list_for_each_entry(cie, &dwarf_cie_list, link) kfree(cie); - spin_unlock_irqrestore(&dwarf_cie_lock, flags); - spin_lock_irqsave(&dwarf_fde_lock, flags); - list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) + list_for_each_entry(fde, &dwarf_fde_list, link) kfree(fde); - spin_unlock_irqrestore(&dwarf_fde_lock, flags); } /** @@ -816,7 +812,7 @@ static void dwarf_unwinder_cleanup(void) * easy to lookup the FDE for a given PC, so we build a list of FDE * and CIE entries that make it easier. */ -void dwarf_unwinder_init(void) +static int __init dwarf_unwinder_init(void) { u32 entry_type; void *p, *entry; @@ -877,9 +873,11 @@ void dwarf_unwinder_init(void) if (err) goto out; - return; + return 0; out: printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err); dwarf_unwinder_cleanup(); + return -EINVAL; } +early_initcall(dwarf_unwinder_init); diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 2bb43dc74f22..278c68c60488 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include @@ -262,9 +261,6 @@ void __init init_IRQ(void) sh_mv.mv_init_irq(); irq_ctx_init(smp_processor_id()); - - /* This needs to be early, but not too early.. */ - dwarf_unwinder_init(); } #ifdef CONFIG_SPARSE_IRQ -- cgit v1.2.3-59-g8ed1b From 309214af53f030e0664aea011a9f628c5eb9cc2d Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Mon, 17 Aug 2009 09:27:29 +0000 Subject: sh: rework SuperH Mobile sleep code exception handling This patch updates the exception handling in the sleep code for SuperH Mobile. With the patch applied the sleep code always rewrites the VBR and resumes from the exception vector, re-initializes hardware and jumps straight to the original interrupt vector. Tested on sh7722 and sh7724 with "Sleep Mode", "Sleep Mode + SF" and "Software Standby Mode + SF" with CONFIG_SUSPEND. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/shmobile/pm.c | 16 +--- arch/sh/kernel/cpu/shmobile/sleep.S | 155 ++++++++++++++++++++++++------------ 2 files changed, 107 insertions(+), 64 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c index de078d24ce56..ee3c2aaf66fb 100644 --- a/arch/sh/kernel/cpu/shmobile/pm.c +++ b/arch/sh/kernel/cpu/shmobile/pm.c @@ -41,23 +41,11 @@ extern const unsigned int sh_mobile_standby_size; void sh_mobile_call_standby(unsigned long mode) { - extern void *vbr_base; void *onchip_mem = (void *)ILRAM_BASE; - void (*standby_onchip_mem)(unsigned long) = onchip_mem; - - /* Note: Wake up from sleep may generate exceptions! - * Setup VBR to point to on-chip ram if self-refresh is - * going to be used. - */ - if (mode & SUSP_SH_SF) - asm volatile("ldc %0, vbr" : : "r" (onchip_mem) : "memory"); + void (*standby_onchip_mem)(unsigned long, unsigned long) = onchip_mem; /* Let assembly snippet in on-chip memory handle the rest */ - standby_onchip_mem(mode); - - /* Put VBR back in System RAM again */ - if (mode & SUSP_SH_SF) - asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); + standby_onchip_mem(mode, ILRAM_BASE); } static int sh_pm_enter(suspend_state_t state) diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S index baf2d7d46b05..a439e6c7824f 100644 --- a/arch/sh/kernel/cpu/shmobile/sleep.S +++ b/arch/sh/kernel/cpu/shmobile/sleep.S @@ -16,19 +16,52 @@ #include #include +/* + * Kernel mode register usage, see entry.S: + * k0 scratch + * k1 scratch + * k4 scratch + */ +#define k0 r0 +#define k1 r1 +#define k4 r4 + /* manage self-refresh and enter standby mode. * this code will be copied to on-chip memory and executed from there. */ .balign 4096,0,4096 ENTRY(sh_mobile_standby) + + /* save original vbr */ + stc vbr, r1 + mova saved_vbr, r0 + mov.l r1, @r0 + + /* point vbr to our on-chip memory page */ + ldc r5, vbr + + /* save return address */ + mova saved_spc, r0 + sts pr, r5 + mov.l r5, @r0 + + /* save sr */ + mova saved_sr, r0 + stc sr, r5 + mov.l r5, @r0 + + /* save mode flags */ + mova saved_mode, r0 + mov.l r4, @r0 + + /* put mode flags in r0 */ mov r4, r0 tst #SUSP_SH_SF, r0 bt skip_set_sf #ifdef CONFIG_CPU_SUBTYPE_SH7724 /* DBSC: put memory in self-refresh mode */ - mov.l dben_reg, r4 mov.l dben_data0, r1 mov.l r1, @r4 @@ -60,14 +93,6 @@ ENTRY(sh_mobile_standby) #endif skip_set_sf: - tst #SUSP_SH_SLEEP, r0 - bt test_standby - - /* set mode to "sleep mode" */ - bra do_sleep - mov #0x00, r1 - -test_standby: tst #SUSP_SH_STANDBY, r0 bt test_rstandby @@ -85,77 +110,107 @@ test_rstandby: test_ustandby: tst #SUSP_SH_USTANDBY, r0 - bt done_sleep + bt force_sleep /* set mode to "u-standby mode" */ - mov #0x10, r1 + bra do_sleep + mov #0x10, r1 - /* fall-through */ +force_sleep: + + /* set mode to "sleep mode" */ + mov #0x00, r1 do_sleep: /* setup and enter selected standby mode */ mov.l 5f, r4 mov.l r1, @r4 +again: sleep + bra again + nop + +restore_jump_vbr: + /* setup spc with return address to c code */ + mov.l saved_spc, k0 + ldc k0, spc + + /* restore vbr */ + mov.l saved_vbr, k0 + ldc k0, vbr + + /* setup ssr with saved sr */ + mov.l saved_sr, k0 + ldc k0, ssr + + /* get mode flags */ + mov.l saved_mode, k0 done_sleep: /* reset standby mode to sleep mode */ - mov.l 5f, r4 - mov #0x00, r1 - mov.l r1, @r4 + mov.l 5f, k4 + mov #0x00, k1 + mov.l k1, @k4 - tst #SUSP_SH_SF, r0 + tst #SUSP_SH_SF, k0 bt skip_restore_sf #ifdef CONFIG_CPU_SUBTYPE_SH7724 /* DBSC: put memory in auto-refresh mode */ + mov.l dbrfpdn0_reg, k4 + mov.l dbrfpdn0_data0, k1 + mov.l k1, @k4 - mov.l dbrfpdn0_reg, r4 - mov.l dbrfpdn0_data0, r1 - mov.l r1, @r4 - - /* sleep 140 ns */ - nop + nop /* sleep 140 ns */ nop nop nop - mov.l dbcmdcnt_reg, r4 - mov.l dbcmdcnt_data0, r1 - mov.l r1, @r4 + mov.l dbcmdcnt_reg, k4 + mov.l dbcmdcnt_data0, k1 + mov.l k1, @k4 - mov.l dbcmdcnt_reg, r4 - mov.l dbcmdcnt_data1, r1 - mov.l r1, @r4 + mov.l dbcmdcnt_reg, k4 + mov.l dbcmdcnt_data1, k1 + mov.l k1, @k4 - mov.l dben_reg, r4 - mov.l dben_data1, r1 - mov.l r1, @r4 + mov.l dben_reg, k4 + mov.l dben_data1, k1 + mov.l k1, @k4 - mov.l dbrfpdn0_reg, r4 - mov.l dbrfpdn0_data2, r1 - mov.l r1, @r4 + mov.l dbrfpdn0_reg, k4 + mov.l dbrfpdn0_data2, k1 + mov.l k1, @k4 #else /* SBSC: set auto-refresh mode */ - mov.l 1f, r4 - mov.l @r4, r2 - mov.l 4f, r3 - and r3, r2 - mov.l r2, @r4 - mov.l 6f, r4 - mov.l 7f, r1 - mov.l 8f, r2 - mov.l @r4, r3 - mov #-1, r4 - add r4, r3 - or r2, r3 - mov.l r3, @r1 + mov.l 1f, k4 + mov.l @k4, k0 + mov.l 4f, k1 + and k1, k0 + mov.l k0, @k4 + mov.l 6f, k4 + mov.l 8f, k0 + mov.l @k4, k1 + mov #-1, k4 + add k4, k1 + or k1, k0 + mov.l 7f, k1 + mov.l k0, @k1 #endif skip_restore_sf: - rts + /* jump to vbr vector */ + mov.l saved_vbr, k0 + mov.l offset_vbr, k4 + add k4, k0 + jmp @k0 nop .balign 4 +saved_mode: .long 0 +saved_spc: .long 0 +saved_sr: .long 0 +saved_vbr: .long 0 +offset_vbr: .long 0x600 #ifdef CONFIG_CPU_SUBTYPE_SH7724 dben_reg: .long 0xfd000010 /* DBEN */ dben_data0: .long 0 @@ -178,12 +233,12 @@ dbcmdcnt_data1: .long 4 7: .long 0xfe400018 /* RTCNT */ 8: .long 0xa55a0000 + /* interrupt vector @ 0x600 */ .balign 0x400,0,0x400 .long 0xdeadbeef .balign 0x200,0,0x200 - /* sh7722 will end up here in sleep mode */ - rte + bra restore_jump_vbr nop sh_mobile_standby_end: -- cgit v1.2.3-59-g8ed1b From 63cd91dfd4976cb9687b07e9a9864912631d2e08 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Mon, 17 Aug 2009 09:41:40 +0000 Subject: sh: SuperH Mobile Software Standby support for cpuidle This patch adds "SuperH Mobile Standby Mode [SF]" to the list of cpuidle sleep modes. If the software latency requirements from cpuidle are met together with fulfilled hardware requirements then deep sleep modes can be entered. Tested on sh7722 and sh7724 with "Sleep Mode", "Sleep Mode + SF" and "Software Standby Mode + SF" together with a multimedia work load and flood ping without packet drop. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/shmobile/cpuidle.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c index 4afdd975cc66..1c504bd972c3 100644 --- a/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c @@ -21,6 +21,7 @@ static unsigned long cpuidle_mode[] = { SUSP_SH_SLEEP, /* regular sleep mode */ SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */ + SUSP_SH_STANDBY | SUSP_SH_SF, /* software standby mode + self refresh */ }; static int cpuidle_sleep_enter(struct cpuidle_device *dev, @@ -96,6 +97,16 @@ void sh_mobile_setup_cpuidle(void) state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = cpuidle_sleep_enter; + state = &dev->states[i++]; + snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); + strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", CPUIDLE_DESC_LEN); + state->exit_latency = 2300; + state->target_residency = 1 * 2; + state->power_usage = 1; + state->flags = 0; + state->flags |= CPUIDLE_FLAG_TIME_VALID; + state->enter = cpuidle_sleep_enter; + dev->state_count = i; cpuidle_register_device(dev); -- cgit v1.2.3-59-g8ed1b From f3a8308864f920d2babe76921733d76ec4a11fb8 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 18 Aug 2009 11:35:09 +0900 Subject: sh: Add a few missing irqflags tracing markers. save_regs contains an SR modification without an irqflags annotation, which resulted in a missing TRACE_IRQS_OFF in the interrupt exception path on SH-3/SH4. I've also moved the TRACE_IRQS_OFF/ON annotation when returning from the interrupt to just before we call __restore_all. This seems like the most logical place to put this because the annotation is for when we restore the SR register so we should delay the annotation until as last as possible. We were also missing a TRACE_IRQS_OFF in resume_kernel when CONFIG_PREEMPT is enabled. The end result is that this fixes up the lockdep engine debugging support with CONFIG_PREEMPT enabled on all SH-3/4 parts. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/entry.S | 8 ++++++++ arch/sh/kernel/entry-common.S | 27 ++++++++++++++++----------- 2 files changed, 24 insertions(+), 11 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 67ad6467c694..272636ec5c98 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -508,6 +508,14 @@ ENTRY(handle_interrupt) bsr save_regs ! needs original pr value in k3 mov #-1, k2 ! default vector kept in k2 + stc sr, r0 ! get status register + shlr2 r0 + and #0x3c, r0 + cmp/eq #0x3c, r0 + bf 9f + TRACE_IRQS_OFF +9: + ! Setup return address and jump to do_IRQ mov.l 4f, r9 ! fetch return address lds r9, pr ! put return address in pr diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index e63178fefb9b..700477601c6f 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -77,15 +77,6 @@ ENTRY(ret_from_irq) ! mov #OFF_SR, r0 mov.l @(r0,r15), r0 ! get status register - - shlr2 r0 - and #0x3c, r0 - cmp/eq #0x3c, r0 - bt 9f - TRACE_IRQS_ON -9: - mov #OFF_SR, r0 - mov.l @(r0,r15), r0 ! get status register shll r0 shll r0 ! kernel space? get_current_thread_info r8, r0 @@ -96,6 +87,7 @@ ENTRY(ret_from_irq) nop ENTRY(resume_kernel) cli + TRACE_IRQS_OFF mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count tst r0, r0 bf noresched @@ -213,12 +205,25 @@ syscall_trace_entry: mov.l r0, @(OFF_R0,r15) ! Return value __restore_all: - mov.l 1f, r0 + mov #OFF_SR, r0 + mov.l @(r0,r15), r0 ! get status register + + shlr2 r0 + and #0x3c, r0 + cmp/eq #0x3c, r0 + bt 1f + TRACE_IRQS_ON + bra 2f + nop +1: + TRACE_IRQS_OFF +2: + mov.l 3f, r0 jmp @r0 nop .align 2 -1: .long restore_all +3: .long restore_all .align 2 syscall_badsys: ! Bad syscall number -- cgit v1.2.3-59-g8ed1b From b2ea8b421515ddd692c88fc5afb0e7f93e96e6cb Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Tue, 18 Aug 2009 06:51:47 +0000 Subject: sh: clkfwk: Support additional IFC divisor on SH7724. This patch updates the FRQCRA.IFC divisor values for SH7724. Despite not being initially documented, the / 3 mode is also support for the IFC division. Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/clock-sh7724.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index 34611d97378e..627588dfddf0 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c @@ -127,7 +127,7 @@ struct clk *main_clks[] = { &div3_clk, }; -static int divisors[] = { 2, 0, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 }; +static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 }; static struct clk_div_mult_table div4_table = { .divisors = divisors, -- cgit v1.2.3-59-g8ed1b From fc6191dd301e3bc8cbb142aab1a2ec4e699cb2de Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 19 Aug 2009 17:53:04 +0900 Subject: sh: Fix up clockevents broadcasting. This fixes up the clockevents broadcasting code as detailed in commit ee348d5a1d810bc9958cabb7c27302aab235d36e ("[ARM] realview: fix broadcast tick support"). This saves us from having to do strange ordering things with the broadcast clockevent device, relying on the rating instead. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/smp-shx3.c | 2 ++ arch/sh/kernel/localtimer.c | 9 +++++++-- arch/sh/kernel/time.c | 4 ---- 3 files changed, 9 insertions(+), 6 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c index 2b6b0d50c576..185ec3976a25 100644 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c @@ -57,6 +57,8 @@ void __init plat_prepare_cpus(unsigned int max_cpus) { int i; + local_timer_setup(0); + BUILD_BUG_ON(SMP_MSG_NR >= 8); for (i = 0; i < SMP_MSG_NR; i++) diff --git a/arch/sh/kernel/localtimer.c b/arch/sh/kernel/localtimer.c index 96e8eaea1e62..0b04e7d4a9b9 100644 --- a/arch/sh/kernel/localtimer.c +++ b/arch/sh/kernel/localtimer.c @@ -22,6 +22,7 @@ #include #include #include +#include #include static DEFINE_PER_CPU(struct clock_event_device, local_clockevent); @@ -33,7 +34,9 @@ void local_timer_interrupt(void) { struct clock_event_device *clk = &__get_cpu_var(local_clockevent); + irq_enter(); clk->event_handler(clk); + irq_exit(); } static void dummy_timer_set_mode(enum clock_event_mode mode, @@ -46,8 +49,10 @@ void __cpuinit local_timer_setup(unsigned int cpu) struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); clk->name = "dummy_timer"; - clk->features = CLOCK_EVT_FEAT_DUMMY; - clk->rating = 200; + clk->features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_DUMMY; + clk->rating = 400; clk->mult = 1; clk->set_mode = dummy_timer_set_mode; clk->broadcast = smp_timer_broadcast; diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index 7f95f479060f..632aff533285 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c @@ -119,9 +119,5 @@ void __init time_init(void) set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); -#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST - local_timer_setup(smp_processor_id()); -#endif - late_time_init = sh_late_time_init; } -- cgit v1.2.3-59-g8ed1b From 307646c958f0f3c3624368eaa72dce8567b25f93 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 20 Aug 2009 12:07:46 +0900 Subject: sh: Fix up the CONFIG_FTRACE_SYSCALLS=n build. -tip can't be bothered keeping interfaces stable long enough for anyone to use them without having their builds broken without notification, so just ifdef around the problematic symbols until the new interfaces become available upstream. Signed-off-by: Paul Mundt --- arch/sh/kernel/ptrace_32.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index c198eceaee94..f4f58e438a26 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -461,8 +461,10 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) */ ret = -1L; +#ifdef CONFIG_FTRACE_SYSCALLS if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) ftrace_syscall_enter(regs); +#endif if (unlikely(current->audit_context)) audit_syscall_entry(audit_arch(), regs->regs[3], @@ -480,8 +482,10 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]), regs->regs[0]); +#ifdef CONFIG_FTRACE_SYSCALLS if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) ftrace_syscall_exit(regs); +#endif step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) -- cgit v1.2.3-59-g8ed1b From c01f0f1a4a96eb3acc5850e18cc43f24366966d0 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Fri, 21 Aug 2009 16:30:28 +0900 Subject: sh: Add initial support for SH7757 CPU subtype Signed-off-by: Yoshihiro Shimoda Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 7 + arch/sh/Kconfig.debug | 1 + arch/sh/include/asm/processor.h | 2 +- arch/sh/include/cpu-sh4/cpu/freq.h | 4 + arch/sh/include/cpu-sh4/cpu/sh7757.h | 243 ++++ arch/sh/kernel/cpu/sh4/probe.c | 11 +- arch/sh/kernel/cpu/sh4a/Makefile | 3 + arch/sh/kernel/cpu/sh4a/clock-sh7757.c | 130 ++ arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c | 2019 +++++++++++++++++++++++++++++++ arch/sh/kernel/cpu/sh4a/setup-sh7757.c | 513 ++++++++ arch/sh/kernel/setup.c | 2 +- arch/sh/mm/Kconfig | 7 +- drivers/serial/sh-sci.c | 3 +- drivers/serial/sh-sci.h | 17 + 14 files changed, 2954 insertions(+), 8 deletions(-) create mode 100644 arch/sh/include/cpu-sh4/cpu/sh7757.h create mode 100644 arch/sh/kernel/cpu/sh4a/clock-sh7757.c create mode 100644 arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c create mode 100644 arch/sh/kernel/cpu/sh4a/setup-sh7757.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index c4a955d25451..d17570c61dfd 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -396,6 +396,13 @@ config CPU_SUBTYPE_SH7724 help Select SH7724 if you have an SH-MobileR2R CPU. +config CPU_SUBTYPE_SH7757 + bool "Support SH7757 processor" + select CPU_SH4A + select CPU_SHX2 + help + Select SH7757 if you have a SH4A SH7757 CPU. + config CPU_SUBTYPE_SH7763 bool "Support SH7763 processor" select CPU_SH4A diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index 741d20fab2e1..55907af1dc25 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug @@ -38,6 +38,7 @@ config EARLY_SCIF_CONSOLE_PORT default "0xffe00000" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7763 || \ CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7366 || \ CPU_SUBTYPE_SH7343 + default "0xfe4c0000" if CPU_SUBTYPE_SH7757 default "0xffeb0000" if CPU_SUBTYPE_SH7785 default "0xffeb0000" if CPU_SUBTYPE_SH7786 default "0xfffe8000" if CPU_SUBTYPE_SH7203 diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index ff7daaf9a620..9d87868bc53d 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -32,7 +32,7 @@ enum cpu_type { /* SH-4A types */ CPU_SH7763, CPU_SH7770, CPU_SH7780, CPU_SH7781, CPU_SH7785, CPU_SH7786, - CPU_SH7723, CPU_SH7724, CPU_SHX3, + CPU_SH7723, CPU_SH7724, CPU_SH7757, CPU_SHX3, /* SH4AL-DSP types */ CPU_SH7343, CPU_SH7722, CPU_SH7366, diff --git a/arch/sh/include/cpu-sh4/cpu/freq.h b/arch/sh/include/cpu-sh4/cpu/freq.h index ccf1d999db6d..e1e90960ee9a 100644 --- a/arch/sh/include/cpu-sh4/cpu/freq.h +++ b/arch/sh/include/cpu-sh4/cpu/freq.h @@ -22,6 +22,10 @@ #define MSTPCR0 0xa4150030 #define MSTPCR1 0xa4150034 #define MSTPCR2 0xa4150038 +#elif defined(CONFIG_CPU_SUBTYPE_SH7757) +#define FRQCR 0xffc80000 +#define OSCCR 0xffc80018 +#define PLLCR 0xffc80024 #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) #define FRQCR 0xffc80000 diff --git a/arch/sh/include/cpu-sh4/cpu/sh7757.h b/arch/sh/include/cpu-sh4/cpu/sh7757.h new file mode 100644 index 000000000000..f4d267efad71 --- /dev/null +++ b/arch/sh/include/cpu-sh4/cpu/sh7757.h @@ -0,0 +1,243 @@ +#ifndef __ASM_SH7757_H__ +#define __ASM_SH7757_H__ + +enum { + /* PTA */ + GPIO_PTA7, GPIO_PTA6, GPIO_PTA5, GPIO_PTA4, + GPIO_PTA3, GPIO_PTA2, GPIO_PTA1, GPIO_PTA0, + + /* PTB */ + GPIO_PTB7, GPIO_PTB6, GPIO_PTB5, GPIO_PTB4, + GPIO_PTB3, GPIO_PTB2, GPIO_PTB1, GPIO_PTB0, + + /* PTC */ + GPIO_PTC7, GPIO_PTC6, GPIO_PTC5, GPIO_PTC4, + GPIO_PTC3, GPIO_PTC2, GPIO_PTC1, GPIO_PTC0, + + /* PTD */ + GPIO_PTD7, GPIO_PTD6, GPIO_PTD5, GPIO_PTD4, + GPIO_PTD3, GPIO_PTD2, GPIO_PTD1, GPIO_PTD0, + + /* PTE */ + GPIO_PTE7, GPIO_PTE6, GPIO_PTE5, GPIO_PTE4, + GPIO_PTE3, GPIO_PTE2, GPIO_PTE1, GPIO_PTE0, + + /* PTF */ + GPIO_PTF7, GPIO_PTF6, GPIO_PTF5, GPIO_PTF4, + GPIO_PTF3, GPIO_PTF2, GPIO_PTF1, GPIO_PTF0, + + /* PTG */ + GPIO_PTG7, GPIO_PTG6, GPIO_PTG5, GPIO_PTG4, + GPIO_PTG3, GPIO_PTG2, GPIO_PTG1, GPIO_PTG0, + + /* PTH */ + GPIO_PTH7, GPIO_PTH6, GPIO_PTH5, GPIO_PTH4, + GPIO_PTH3, GPIO_PTH2, GPIO_PTH1, GPIO_PTH0, + + /* PTI */ + GPIO_PTI7, GPIO_PTI6, GPIO_PTI5, GPIO_PTI4, + GPIO_PTI3, GPIO_PTI2, GPIO_PTI1, GPIO_PTI0, + + /* PTJ */ + GPIO_PTJ7, GPIO_PTJ6, GPIO_PTJ5, GPIO_PTJ4, + GPIO_PTJ3, GPIO_PTJ2, GPIO_PTJ1, GPIO_PTJ0, + + /* PTK */ + GPIO_PTK7, GPIO_PTK6, GPIO_PTK5, GPIO_PTK4, + GPIO_PTK3, GPIO_PTK2, GPIO_PTK1, GPIO_PTK0, + + /* PTL */ + GPIO_PTL7, GPIO_PTL6, GPIO_PTL5, GPIO_PTL4, + GPIO_PTL3, GPIO_PTL2, GPIO_PTL1, GPIO_PTL0, + + /* PTM */ + GPIO_PTM6, GPIO_PTM5, GPIO_PTM4, + GPIO_PTM3, GPIO_PTM2, GPIO_PTM1, GPIO_PTM0, + + /* PTN */ + GPIO_PTN7, GPIO_PTN6, GPIO_PTN5, GPIO_PTN4, + GPIO_PTN3, GPIO_PTN2, GPIO_PTN1, GPIO_PTN0, + + /* PTO */ + GPIO_PTO7, GPIO_PTO6, GPIO_PTO5, GPIO_PTO4, + GPIO_PTO3, GPIO_PTO2, GPIO_PTO1, GPIO_PTO0, + + /* PTP */ + GPIO_PTP6, GPIO_PTP5, GPIO_PTP4, + GPIO_PTP3, GPIO_PTP2, GPIO_PTP1, GPIO_PTP0, + + /* PTQ */ + GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4, + GPIO_PTQ3, GPIO_PTQ2, GPIO_PTQ1, GPIO_PTQ0, + + /* PTR */ + GPIO_PTR7, GPIO_PTR6, GPIO_PTR5, GPIO_PTR4, + GPIO_PTR3, GPIO_PTR2, GPIO_PTR1, GPIO_PTR0, + + /* PTS */ + GPIO_PTS7, GPIO_PTS6, GPIO_PTS5, GPIO_PTS4, + GPIO_PTS3, GPIO_PTS2, GPIO_PTS1, GPIO_PTS0, + + /* PTT */ + GPIO_PTT5, GPIO_PTT4, + GPIO_PTT3, GPIO_PTT2, GPIO_PTT1, GPIO_PTT0, + + /* PTU */ + GPIO_PTU7, GPIO_PTU6, GPIO_PTU5, GPIO_PTU4, + GPIO_PTU3, GPIO_PTU2, GPIO_PTU1, GPIO_PTU0, + + /* PTV */ + GPIO_PTV7, GPIO_PTV6, GPIO_PTV5, GPIO_PTV4, + GPIO_PTV3, GPIO_PTV2, GPIO_PTV1, GPIO_PTV0, + + /* PTW */ + GPIO_PTW7, GPIO_PTW6, GPIO_PTW5, GPIO_PTW4, + GPIO_PTW3, GPIO_PTW2, GPIO_PTW1, GPIO_PTW0, + + /* PTX */ + GPIO_PTX7, GPIO_PTX6, GPIO_PTX5, GPIO_PTX4, + GPIO_PTX3, GPIO_PTX2, GPIO_PTX1, GPIO_PTX0, + + /* PTY */ + GPIO_PTY7, GPIO_PTY6, GPIO_PTY5, GPIO_PTY4, + GPIO_PTY3, GPIO_PTY2, GPIO_PTY1, GPIO_PTY0, + + /* PTZ */ + GPIO_PTZ7, GPIO_PTZ6, GPIO_PTZ5, GPIO_PTZ4, + GPIO_PTZ3, GPIO_PTZ2, GPIO_PTZ1, GPIO_PTZ0, + + + /* PTA (mobule: LBSC, CPG, LPC) */ + GPIO_FN_BS, GPIO_FN_RDWR, GPIO_FN_WE1, GPIO_FN_RDY, + GPIO_FN_MD10, GPIO_FN_MD9, GPIO_FN_MD8, + GPIO_FN_LGPIO7, GPIO_FN_LGPIO6, GPIO_FN_LGPIO5, GPIO_FN_LGPIO4, + GPIO_FN_LGPIO3, GPIO_FN_LGPIO2, GPIO_FN_LGPIO1, GPIO_FN_LGPIO0, + + /* PTB (mobule: LBSC, EtherC, SIM, LPC) */ + GPIO_FN_D15, GPIO_FN_D14, GPIO_FN_D13, GPIO_FN_D12, + GPIO_FN_D11, GPIO_FN_D10, GPIO_FN_D9, GPIO_FN_D8, + GPIO_FN_ET0_MDC, GPIO_FN_ET0_MDIO, + GPIO_FN_ET1_MDC, GPIO_FN_ET1_MDIO, + GPIO_FN_SIM_D, GPIO_FN_SIM_CLK, GPIO_FN_SIM_RST, + GPIO_FN_WPSZ1, GPIO_FN_WPSZ0, GPIO_FN_FWID, GPIO_FN_FLSHSZ, + GPIO_FN_LPC_SPIEN, GPIO_FN_BASEL, + + /* PTC (mobule: SD) */ + GPIO_FN_SD_WP, GPIO_FN_SD_CD, GPIO_FN_SD_CLK, GPIO_FN_SD_CMD, + GPIO_FN_SD_D3, GPIO_FN_SD_D2, GPIO_FN_SD_D1, GPIO_FN_SD_D0, + + /* PTD (mobule: INTC, SPI0, LBSC, CPG, ADC) */ + GPIO_FN_IRQ7, GPIO_FN_IRQ6, GPIO_FN_IRQ5, GPIO_FN_IRQ4, + GPIO_FN_IRQ3, GPIO_FN_IRQ2, GPIO_FN_IRQ1, GPIO_FN_IRQ0, + GPIO_FN_MD6, GPIO_FN_MD5, GPIO_FN_MD3, GPIO_FN_MD2, + GPIO_FN_MD1, GPIO_FN_MD0, GPIO_FN_ADTRG1, GPIO_FN_ADTRG0, + + /* PTE (mobule: EtherC) */ + GPIO_FN_ET0_CRS_DV, GPIO_FN_ET0_TXD1, + GPIO_FN_ET0_TXD0, GPIO_FN_ET0_TX_EN, + GPIO_FN_ET0_REF_CLK, GPIO_FN_ET0_RXD1, + GPIO_FN_ET0_RXD0, GPIO_FN_ET0_RX_ER, + + /* PTF (mobule: EtherC) */ + GPIO_FN_ET1_CRS_DV, GPIO_FN_ET1_TXD1, + GPIO_FN_ET1_TXD0, GPIO_FN_ET1_TX_EN, + GPIO_FN_ET1_REF_CLK, GPIO_FN_ET1_RXD1, + GPIO_FN_ET1_RXD0, GPIO_FN_ET1_RX_ER, + + /* PTG (mobule: SYSTEM, PWMX, LPC) */ + GPIO_FN_STATUS0, GPIO_FN_STATUS1, + GPIO_FN_PWX0, GPIO_FN_PWX1, GPIO_FN_PWX2, GPIO_FN_PWX3, + GPIO_FN_SERIRQ, GPIO_FN_CLKRUN, GPIO_FN_LPCPD, GPIO_FN_LDRQ, + + /* PTH (mobule: TMU, SCIF234, SPI1, SPI0) */ + GPIO_FN_TCLK, GPIO_FN_RXD4, GPIO_FN_TXD4, + GPIO_FN_SP1_MOSI, GPIO_FN_SP1_MISO, + GPIO_FN_SP1_SCK, GPIO_FN_SP1_SCK_FB, + GPIO_FN_SP1_SS0, GPIO_FN_SP1_SS1, + GPIO_FN_SP0_SS1, + + /* PTI (mobule: INTC) */ + GPIO_FN_IRQ15, GPIO_FN_IRQ14, GPIO_FN_IRQ13, GPIO_FN_IRQ12, + GPIO_FN_IRQ11, GPIO_FN_IRQ10, GPIO_FN_IRQ9, GPIO_FN_IRQ8, + + /* PTJ (mobule: SCIF234, SERMUX) */ + GPIO_FN_RXD3, GPIO_FN_TXD3, GPIO_FN_RXD2, GPIO_FN_TXD2, + GPIO_FN_COM1_TXD, GPIO_FN_COM1_RXD, + GPIO_FN_COM1_RTS, GPIO_FN_COM1_CTS, + + /* PTK (mobule: SERMUX) */ + GPIO_FN_COM2_TXD, GPIO_FN_COM2_RXD, + GPIO_FN_COM2_RTS, GPIO_FN_COM2_CTS, + GPIO_FN_COM2_DTR, GPIO_FN_COM2_DSR, + GPIO_FN_COM2_DCD, GPIO_FN_COM2_RI, + + /* PTL (mobule: SERMUX) */ + GPIO_FN_RAC_TXD, GPIO_FN_RAC_RXD, + GPIO_FN_RAC_RTS, GPIO_FN_RAC_CTS, + GPIO_FN_RAC_DTR, GPIO_FN_RAC_DSR, + GPIO_FN_RAC_DCD, GPIO_FN_RAC_RI, + + /* PTM (mobule: IIC, LPC) */ + GPIO_FN_SDA6, GPIO_FN_SCL6, GPIO_FN_SDA7, GPIO_FN_SCL7, + GPIO_FN_WP, GPIO_FN_FMS0, GPIO_FN_FMS1, + + /* PTN (mobule: SCIF234, EVC) */ + GPIO_FN_SCK2, GPIO_FN_RTS4, GPIO_FN_RTS3, GPIO_FN_RTS2, + GPIO_FN_CTS4, GPIO_FN_CTS3, GPIO_FN_CTS2, + GPIO_FN_EVENT7, GPIO_FN_EVENT6, GPIO_FN_EVENT5, GPIO_FN_EVENT4, + GPIO_FN_EVENT3, GPIO_FN_EVENT2, GPIO_FN_EVENT1, GPIO_FN_EVENT0, + + /* PTO (mobule: SGPIO) */ + GPIO_FN_SGPIO0_CLK, GPIO_FN_SGPIO0_LOAD, + GPIO_FN_SGPIO0_DI, GPIO_FN_SGPIO0_DO, + GPIO_FN_SGPIO1_CLK, GPIO_FN_SGPIO1_LOAD, + GPIO_FN_SGPIO1_DI, GPIO_FN_SGPIO1_DO, + + /* PTP (mobule: JMC, SCIF234) */ + GPIO_FN_JMCTCK, GPIO_FN_JMCTMS, GPIO_FN_JMCTDO, GPIO_FN_JMCTDI, + GPIO_FN_JMCRST, GPIO_FN_SCK4, GPIO_FN_SCK3, + + /* PTQ (mobule: LPC) */ + GPIO_FN_LAD3, GPIO_FN_LAD2, GPIO_FN_LAD1, GPIO_FN_LAD0, + GPIO_FN_LFRAME, GPIO_FN_LRESET, GPIO_FN_LCLK, + + /* PTR (mobule: GRA, IIC) */ + GPIO_FN_DDC3, GPIO_FN_DDC2, + GPIO_FN_SDA8, GPIO_FN_SCL8, GPIO_FN_SDA2, GPIO_FN_SCL2, + GPIO_FN_SDA1, GPIO_FN_SCL1, GPIO_FN_SDA0, GPIO_FN_SCL0, + + /* PTS (mobule: GRA, IIC) */ + GPIO_FN_DDC1, GPIO_FN_DDC0, + GPIO_FN_SDA9, GPIO_FN_SCL9, GPIO_FN_SDA5, GPIO_FN_SCL5, + GPIO_FN_SDA4, GPIO_FN_SCL4, GPIO_FN_SDA3, GPIO_FN_SCL3, + + /* PTT (mobule: SYSTEM, PWMX) */ + GPIO_FN_AUDSYNC, GPIO_FN_AUDCK, + GPIO_FN_AUDATA3, GPIO_FN_AUDATA2, + GPIO_FN_AUDATA1, GPIO_FN_AUDATA0, + GPIO_FN_PWX7, GPIO_FN_PWX6, GPIO_FN_PWX5, GPIO_FN_PWX4, + + /* PTU (mobule: LBSC, DMAC) */ + GPIO_FN_CS6, GPIO_FN_CS5, GPIO_FN_CS4, GPIO_FN_CS0, + GPIO_FN_RD, GPIO_FN_WE0, GPIO_FN_A25, GPIO_FN_A24, + GPIO_FN_DREQ0, GPIO_FN_DACK0, + + /* PTV (mobule: LBSC, DMAC) */ + GPIO_FN_A23, GPIO_FN_A22, GPIO_FN_A21, GPIO_FN_A20, + GPIO_FN_A19, GPIO_FN_A18, GPIO_FN_A17, GPIO_FN_A16, + GPIO_FN_TEND0, GPIO_FN_DREQ1, GPIO_FN_DACK1, GPIO_FN_TEND1, + + /* PTW (mobule: LBSC) */ + GPIO_FN_A15, GPIO_FN_A14, GPIO_FN_A13, GPIO_FN_A12, + GPIO_FN_A11, GPIO_FN_A10, GPIO_FN_A9, GPIO_FN_A8, + + /* PTX (mobule: LBSC) */ + GPIO_FN_A7, GPIO_FN_A6, GPIO_FN_A5, GPIO_FN_A4, + GPIO_FN_A3, GPIO_FN_A2, GPIO_FN_A1, GPIO_FN_A0, + + /* PTY (mobule: LBSC) */ + GPIO_FN_D7, GPIO_FN_D6, GPIO_FN_D5, GPIO_FN_D4, + GPIO_FN_D3, GPIO_FN_D2, GPIO_FN_D1, GPIO_FN_D0, +}; + +#endif /* __ASM_SH7757_H__ */ diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index 6c78d0a9c857..10e6795b56aa 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -139,8 +139,15 @@ int __init detect_cpu_and_cache_system(void) } break; case 0x300b: - boot_cpu_data.type = CPU_SH7724; - boot_cpu_data.flags |= CPU_HAS_L2_CACHE; + switch (prr) { + case 0x20: + boot_cpu_data.type = CPU_SH7723; + boot_cpu_data.flags |= CPU_HAS_L2_CACHE; + break; + case 0x50: + boot_cpu_data.type = CPU_SH7757; + break; + } break; case 0x4000: /* 1st cut */ case 0x4001: /* 2nd cut */ diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile index 12cddf4c721d..490d5dc9e372 100644 --- a/arch/sh/kernel/cpu/sh4a/Makefile +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -3,6 +3,7 @@ # # CPU subtype setup +obj-$(CONFIG_CPU_SUBTYPE_SH7757) += setup-sh7757.o obj-$(CONFIG_CPU_SUBTYPE_SH7763) += setup-sh7763.o obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o @@ -19,6 +20,7 @@ obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o smp-$(CONFIG_CPU_SHX3) := smp-shx3.o # Primary on-chip clocks (common) +clock-$(CONFIG_CPU_SUBTYPE_SH7757) := clock-sh7757.o clock-$(CONFIG_CPU_SUBTYPE_SH7763) := clock-sh7763.o clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o @@ -35,6 +37,7 @@ clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7722) := pinmux-sh7722.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7723) := pinmux-sh7723.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7724) := pinmux-sh7724.o +pinmux-$(CONFIG_CPU_SUBTYPE_SH7757) := pinmux-sh7757.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c new file mode 100644 index 000000000000..ddc235ca9664 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c @@ -0,0 +1,130 @@ +/* + * arch/sh/kernel/cpu/sh4/clock-sh7757.c + * + * SH7757 support for the clock framework + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include + +static int ifc_divisors[] = { 2, 1, 4, 1, 1, 8, 1, 1, + 16, 1, 1, 32, 1, 1, 1, 1 }; +static int sfc_divisors[] = { 2, 1, 4, 1, 1, 8, 1, 1, + 16, 1, 1, 32, 1, 1, 1, 1 }; +static int bfc_divisors[] = { 2, 1, 4, 1, 1, 8, 1, 1, + 16, 1, 1, 32, 1, 1, 1, 1 }; +static int p1fc_divisors[] = { 2, 1, 4, 1, 1, 8, 1, 1, + 16, 1, 1, 32, 1, 1, 1, 1 }; + +static void master_clk_init(struct clk *clk) +{ + clk->rate = CONFIG_SH_PCLK_FREQ * 16; +} + +static struct clk_ops sh7757_master_clk_ops = { + .init = master_clk_init, +}; + +static void module_clk_recalc(struct clk *clk) +{ + int idx = ctrl_inl(FRQCR) & 0x0000000f; + clk->rate = clk->parent->rate / p1fc_divisors[idx]; +} + +static struct clk_ops sh7757_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static void bus_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inl(FRQCR) >> 8) & 0x0000000f; + clk->rate = clk->parent->rate / bfc_divisors[idx]; +} + +static struct clk_ops sh7757_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static void cpu_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inl(FRQCR) >> 20) & 0x0000000f; + clk->rate = clk->parent->rate / ifc_divisors[idx]; +} + +static struct clk_ops sh7757_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct clk_ops *sh7757_clk_ops[] = { + &sh7757_master_clk_ops, + &sh7757_module_clk_ops, + &sh7757_bus_clk_ops, + &sh7757_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh7757_clk_ops)) + *ops = sh7757_clk_ops[idx]; +} + +static void shyway_clk_recalc(struct clk *clk) +{ + int idx = (ctrl_inl(FRQCR) >> 12) & 0x0000000f; + clk->rate = clk->parent->rate / sfc_divisors[idx]; +} + +static struct clk_ops sh7757_shyway_clk_ops = { + .recalc = shyway_clk_recalc, +}; + +static struct clk sh7757_shyway_clk = { + .name = "shyway_clk", + .flags = CLK_ENABLE_ON_INIT, + .ops = &sh7757_shyway_clk_ops, +}; + +/* + * Additional sh7757-specific on-chip clocks that aren't already part of the + * clock framework + */ +static struct clk *sh7757_onchip_clocks[] = { + &sh7757_shyway_clk, +}; + +static int __init sh7757_clk_init(void) +{ + struct clk *clk = clk_get(NULL, "master_clk"); + int i; + + for (i = 0; i < ARRAY_SIZE(sh7757_onchip_clocks); i++) { + struct clk *clkp = sh7757_onchip_clocks[i]; + + clkp->parent = clk; + clk_register(clkp); + clk_enable(clkp); + } + + /* + * Now that we have the rest of the clocks registered, we need to + * force the parent clock to propagate so that these clocks will + * automatically figure out their rate. We cheat by handing the + * parent clock its current rate and forcing child propagation. + */ + clk_set_rate(clk, clk_get_rate(clk)); + + clk_put(clk); + + return 0; +} + +arch_initcall(sh7757_clk_init); + diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c new file mode 100644 index 000000000000..ed23b155c097 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c @@ -0,0 +1,2019 @@ +/* + * SH7757 (A0 step) Pinmux + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * Author : Yoshihiro Shimoda + * + * Based on SH7757 Pinmux + * Copyright (C) 2008 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include + +enum { + PINMUX_RESERVED = 0, + + PINMUX_DATA_BEGIN, + PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, + PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA, + PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, + PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA, + PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA, + PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA, + PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, + PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA, + PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, + PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA, + PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA, + PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA, + PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA, + PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA, + PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, + PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA, + PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA, + PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA, + PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA, + PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA, + PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA, + PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA, + PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, + PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA, + PTM6_DATA, PTM5_DATA, PTM4_DATA, + PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA, + PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA, + PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA, + PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA, + PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA, + PTP6_DATA, PTP5_DATA, PTP4_DATA, + PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA, + PTQ6_DATA, PTQ5_DATA, PTQ4_DATA, + PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA, + PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA, + PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA, + PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA, + PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA, + PTT5_DATA, PTT4_DATA, + PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA, + PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA, + PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA, + PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA, + PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA, + PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA, + PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA, + PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA, + PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA, + PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA, + PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA, + PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA, + PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA, + PINMUX_DATA_END, + + PINMUX_INPUT_BEGIN, + PTA7_IN, PTA6_IN, PTA5_IN, PTA4_IN, + PTA3_IN, PTA2_IN, PTA1_IN, PTA0_IN, + PTB7_IN, PTB6_IN, PTB5_IN, PTB4_IN, + PTB3_IN, PTB2_IN, PTB1_IN, PTB0_IN, + PTC7_IN, PTC6_IN, PTC5_IN, PTC4_IN, + PTC3_IN, PTC2_IN, PTC1_IN, PTC0_IN, + PTD7_IN, PTD6_IN, PTD5_IN, PTD4_IN, + PTD3_IN, PTD2_IN, PTD1_IN, PTD0_IN, + PTE7_IN, PTE6_IN, PTE5_IN, PTE4_IN, + PTE3_IN, PTE2_IN, PTE1_IN, PTE0_IN, + PTF7_IN, PTF6_IN, PTF5_IN, PTF4_IN, + PTF3_IN, PTF2_IN, PTF1_IN, PTF0_IN, + PTG7_IN, PTG6_IN, PTG5_IN, PTG4_IN, + PTG3_IN, PTG2_IN, PTG1_IN, PTG0_IN, + PTH7_IN, PTH6_IN, PTH5_IN, PTH4_IN, + PTH3_IN, PTH2_IN, PTH1_IN, PTH0_IN, + PTI7_IN, PTI6_IN, PTI5_IN, PTI4_IN, + PTI3_IN, PTI2_IN, PTI1_IN, PTI0_IN, + PTJ7_IN, PTJ6_IN, PTJ5_IN, PTJ4_IN, + PTJ3_IN, PTJ2_IN, PTJ1_IN, PTJ0_IN, + PTK7_IN, PTK6_IN, PTK5_IN, PTK4_IN, + PTK3_IN, PTK2_IN, PTK1_IN, PTK0_IN, + PTL7_IN, PTL6_IN, PTL5_IN, PTL4_IN, + PTL3_IN, PTL2_IN, PTL1_IN, PTL0_IN, + PTM6_IN, PTM5_IN, PTM4_IN, + PTM3_IN, PTM2_IN, PTM1_IN, PTM0_IN, + PTN7_IN, PTN6_IN, PTN5_IN, PTN4_IN, + PTN3_IN, PTN2_IN, PTN1_IN, PTN0_IN, + PTO7_IN, PTO6_IN, PTO5_IN, PTO4_IN, + PTO3_IN, PTO2_IN, PTO1_IN, PTO0_IN, + PTP6_IN, PTP5_IN, PTP4_IN, + PTP3_IN, PTP2_IN, PTP1_IN, PTP0_IN, + PTQ6_IN, PTQ5_IN, PTQ4_IN, + PTQ3_IN, PTQ2_IN, PTQ1_IN, PTQ0_IN, + PTR7_IN, PTR6_IN, PTR5_IN, PTR4_IN, + PTR3_IN, PTR2_IN, PTR1_IN, PTR0_IN, + PTS7_IN, PTS6_IN, PTS5_IN, PTS4_IN, + PTS3_IN, PTS2_IN, PTS1_IN, PTS0_IN, + PTT5_IN, PTT4_IN, + PTT3_IN, PTT2_IN, PTT1_IN, PTT0_IN, + PTU7_IN, PTU6_IN, PTU5_IN, PTU4_IN, + PTU3_IN, PTU2_IN, PTU1_IN, PTU0_IN, + PTV7_IN, PTV6_IN, PTV5_IN, PTV4_IN, + PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN, + PTW7_IN, PTW6_IN, PTW5_IN, PTW4_IN, + PTW3_IN, PTW2_IN, PTW1_IN, PTW0_IN, + PTX7_IN, PTX6_IN, PTX5_IN, PTX4_IN, + PTX3_IN, PTX2_IN, PTX1_IN, PTX0_IN, + PTY7_IN, PTY6_IN, PTY5_IN, PTY4_IN, + PTY3_IN, PTY2_IN, PTY1_IN, PTY0_IN, + PTZ7_IN, PTZ6_IN, PTZ5_IN, PTZ4_IN, + PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN, + PINMUX_INPUT_END, + + PINMUX_INPUT_PULLUP_BEGIN, + PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU, + PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU, + PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU, + PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU, + PTW7_IN_PU, PTW6_IN_PU, PTW5_IN_PU, PTW4_IN_PU, + PTW3_IN_PU, PTW2_IN_PU, PTW1_IN_PU, PTW0_IN_PU, + PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU, + PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU, + PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU, + PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU, + PINMUX_INPUT_PULLUP_END, + + PINMUX_OUTPUT_BEGIN, + PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT, + PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT, + PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT, + PTB3_OUT, PTB2_OUT, PTB1_OUT, PTB0_OUT, + PTC7_OUT, PTC6_OUT, PTC5_OUT, PTC4_OUT, + PTC3_OUT, PTC2_OUT, PTC1_OUT, PTC0_OUT, + PTD7_OUT, PTD6_OUT, PTD5_OUT, PTD4_OUT, + PTD3_OUT, PTD2_OUT, PTD1_OUT, PTD0_OUT, + PTE7_OUT, PTE6_OUT, PTE5_OUT, PTE4_OUT, + PTE3_OUT, PTE2_OUT, PTE1_OUT, PTE0_OUT, + PTF7_OUT, PTF6_OUT, PTF5_OUT, PTF4_OUT, + PTF3_OUT, PTF2_OUT, PTF1_OUT, PTF0_OUT, + PTG7_OUT, PTG6_OUT, PTG5_OUT, PTG4_OUT, + PTG3_OUT, PTG2_OUT, PTG1_OUT, PTG0_OUT, + PTH7_OUT, PTH6_OUT, PTH5_OUT, PTH4_OUT, + PTH3_OUT, PTH2_OUT, PTH1_OUT, PTH0_OUT, + PTI7_OUT, PTI6_OUT, PTI5_OUT, PTI4_OUT, + PTI3_OUT, PTI2_OUT, PTI1_OUT, PTI0_OUT, + PTJ7_OUT, PTJ6_OUT, PTJ5_OUT, PTJ4_OUT, + PTJ3_OUT, PTJ2_OUT, PTJ1_OUT, PTJ0_OUT, + PTK7_OUT, PTK6_OUT, PTK5_OUT, PTK4_OUT, + PTK3_OUT, PTK2_OUT, PTK1_OUT, PTK0_OUT, + PTL7_OUT, PTL6_OUT, PTL5_OUT, PTL4_OUT, + PTL3_OUT, PTL2_OUT, PTL1_OUT, PTL0_OUT, + PTM6_OUT, PTM5_OUT, PTM4_OUT, + PTM3_OUT, PTM2_OUT, PTM1_OUT, PTM0_OUT, + PTN7_OUT, PTN6_OUT, PTN5_OUT, PTN4_OUT, + PTN3_OUT, PTN2_OUT, PTN1_OUT, PTN0_OUT, + PTO7_OUT, PTO6_OUT, PTO5_OUT, PTO4_OUT, + PTO3_OUT, PTO2_OUT, PTO1_OUT, PTO0_OUT, + PTP6_OUT, PTP5_OUT, PTP4_OUT, + PTP3_OUT, PTP2_OUT, PTP1_OUT, PTP0_OUT, + PTQ6_OUT, PTQ5_OUT, PTQ4_OUT, + PTQ3_OUT, PTQ2_OUT, PTQ1_OUT, PTQ0_OUT, + PTR7_OUT, PTR6_OUT, PTR5_OUT, PTR4_OUT, + PTR3_OUT, PTR2_OUT, PTR1_OUT, PTR0_OUT, + PTS7_OUT, PTS6_OUT, PTS5_OUT, PTS4_OUT, + PTS3_OUT, PTS2_OUT, PTS1_OUT, PTS0_OUT, + PTT5_OUT, PTT4_OUT, + PTT3_OUT, PTT2_OUT, PTT1_OUT, PTT0_OUT, + PTU7_OUT, PTU6_OUT, PTU5_OUT, PTU4_OUT, + PTU3_OUT, PTU2_OUT, PTU1_OUT, PTU0_OUT, + PTV7_OUT, PTV6_OUT, PTV5_OUT, PTV4_OUT, + PTV3_OUT, PTV2_OUT, PTV1_OUT, PTV0_OUT, + PTW7_OUT, PTW6_OUT, PTW5_OUT, PTW4_OUT, + PTW3_OUT, PTW2_OUT, PTW1_OUT, PTW0_OUT, + PTX7_OUT, PTX6_OUT, PTX5_OUT, PTX4_OUT, + PTX3_OUT, PTX2_OUT, PTX1_OUT, PTX0_OUT, + PTY7_OUT, PTY6_OUT, PTY5_OUT, PTY4_OUT, + PTY3_OUT, PTY2_OUT, PTY1_OUT, PTY0_OUT, + PTZ7_OUT, PTZ6_OUT, PTZ5_OUT, PTZ4_OUT, + PTZ3_OUT, PTZ2_OUT, PTZ1_OUT, PTZ0_OUT, + PINMUX_OUTPUT_END, + + PINMUX_FUNCTION_BEGIN, + PTA7_FN, PTA6_FN, PTA5_FN, PTA4_FN, + PTA3_FN, PTA2_FN, PTA1_FN, PTA0_FN, + PTB7_FN, PTB6_FN, PTB5_FN, PTB4_FN, + PTB3_FN, PTB2_FN, PTB1_FN, PTB0_FN, + PTC7_FN, PTC6_FN, PTC5_FN, PTC4_FN, + PTC3_FN, PTC2_FN, PTC1_FN, PTC0_FN, + PTD7_FN, PTD6_FN, PTD5_FN, PTD4_FN, + PTD3_FN, PTD2_FN, PTD1_FN, PTD0_FN, + PTE7_FN, PTE6_FN, PTE5_FN, PTE4_FN, + PTE3_FN, PTE2_FN, PTE1_FN, PTE0_FN, + PTF7_FN, PTF6_FN, PTF5_FN, PTF4_FN, + PTF3_FN, PTF2_FN, PTF1_FN, PTF0_FN, + PTG7_FN, PTG6_FN, PTG5_FN, PTG4_FN, + PTG3_FN, PTG2_FN, PTG1_FN, PTG0_FN, + PTH7_FN, PTH6_FN, PTH5_FN, PTH4_FN, + PTH3_FN, PTH2_FN, PTH1_FN, PTH0_FN, + PTI7_FN, PTI6_FN, PTI5_FN, PTI4_FN, + PTI3_FN, PTI2_FN, PTI1_FN, PTI0_FN, + PTJ7_FN, PTJ6_FN, PTJ5_FN, PTJ4_FN, + PTJ3_FN, PTJ2_FN, PTJ1_FN, PTJ0_FN, + PTK7_FN, PTK6_FN, PTK5_FN, PTK4_FN, + PTK3_FN, PTK2_FN, PTK1_FN, PTK0_FN, + PTL7_FN, PTL6_FN, PTL5_FN, PTL4_FN, + PTL3_FN, PTL2_FN, PTL1_FN, PTL0_FN, + PTM6_FN, PTM5_FN, PTM4_FN, + PTM3_FN, PTM2_FN, PTM1_FN, PTM0_FN, + PTN7_FN, PTN6_FN, PTN5_FN, PTN4_FN, + PTN3_FN, PTN2_FN, PTN1_FN, PTN0_FN, + PTO7_FN, PTO6_FN, PTO5_FN, PTO4_FN, + PTO3_FN, PTO2_FN, PTO1_FN, PTO0_FN, + PTP6_FN, PTP5_FN, PTP4_FN, + PTP3_FN, PTP2_FN, PTP1_FN, PTP0_FN, + PTQ6_FN, PTQ5_FN, PTQ4_FN, + PTQ3_FN, PTQ2_FN, PTQ1_FN, PTQ0_FN, + PTR7_FN, PTR6_FN, PTR5_FN, PTR4_FN, + PTR3_FN, PTR2_FN, PTR1_FN, PTR0_FN, + PTS7_FN, PTS6_FN, PTS5_FN, PTS4_FN, + PTS3_FN, PTS2_FN, PTS1_FN, PTS0_FN, + PTT5_FN, PTT4_FN, + PTT3_FN, PTT2_FN, PTT1_FN, PTT0_FN, + PTU7_FN, PTU6_FN, PTU5_FN, PTU4_FN, + PTU3_FN, PTU2_FN, PTU1_FN, PTU0_FN, + PTV7_FN, PTV6_FN, PTV5_FN, PTV4_FN, + PTV3_FN, PTV2_FN, PTV1_FN, PTV0_FN, + PTW7_FN, PTW6_FN, PTW5_FN, PTW4_FN, + PTW3_FN, PTW2_FN, PTW1_FN, PTW0_FN, + PTX7_FN, PTX6_FN, PTX5_FN, PTX4_FN, + PTX3_FN, PTX2_FN, PTX1_FN, PTX0_FN, + PTY7_FN, PTY6_FN, PTY5_FN, PTY4_FN, + PTY3_FN, PTY2_FN, PTY1_FN, PTY0_FN, + PTZ7_FN, PTZ6_FN, PTZ5_FN, PTZ4_FN, + PTZ3_FN, PTZ2_FN, PTZ1_FN, PTZ0_FN, + + PS0_15_FN1, PS0_15_FN3, + PS0_14_FN1, PS0_14_FN3, + PS0_13_FN1, PS0_13_FN3, + PS0_12_FN1, PS0_12_FN3, + PS0_7_FN1, PS0_7_FN2, + PS0_6_FN1, PS0_6_FN2, + PS0_5_FN1, PS0_5_FN2, + PS0_4_FN1, PS0_4_FN2, + PS0_3_FN1, PS0_3_FN2, + PS0_2_FN1, PS0_2_FN2, + PS0_1_FN1, PS0_1_FN2, + + PS1_7_FN1, PS1_7_FN3, + PS1_6_FN1, PS1_6_FN3, + + PS2_13_FN1, PS2_13_FN3, + PS2_12_FN1, PS2_12_FN3, + PS2_1_FN1, PS2_1_FN2, + PS2_0_FN1, PS2_0_FN2, + + PS4_15_FN1, PS4_15_FN2, + PS4_14_FN1, PS4_14_FN2, + PS4_13_FN1, PS4_13_FN2, + PS4_12_FN1, PS4_12_FN2, + PS4_11_FN1, PS4_11_FN2, + PS4_10_FN1, PS4_10_FN2, + PS4_9_FN1, PS4_9_FN2, + PS4_3_FN1, PS4_3_FN2, + PS4_2_FN1, PS4_2_FN2, + PS4_1_FN1, PS4_1_FN2, + PS4_0_FN1, PS4_0_FN2, + + PS5_9_FN1, PS5_9_FN2, + PS5_8_FN1, PS5_8_FN2, + PS5_7_FN1, PS5_7_FN2, + PS5_6_FN1, PS5_6_FN2, + PS5_5_FN1, PS5_5_FN2, + PS5_4_FN1, PS5_4_FN2, + + /* AN15 to 8 : EVENT15 to 8 */ + PS6_7_FN_AN, PS6_7_FN_EV, + PS6_6_FN_AN, PS6_6_FN_EV, + PS6_5_FN_AN, PS6_5_FN_EV, + PS6_4_FN_AN, PS6_4_FN_EV, + PS6_3_FN_AN, PS6_3_FN_EV, + PS6_2_FN_AN, PS6_2_FN_EV, + PS6_1_FN_AN, PS6_1_FN_EV, + PS6_0_FN_AN, PS6_0_FN_EV, + + PINMUX_FUNCTION_END, + + PINMUX_MARK_BEGIN, + /* PTA (mobule: LBSC, CPG, LPC) */ + BS_MARK, RDWR_MARK, WE1_MARK, RDY_MARK, + MD10_MARK, MD9_MARK, MD8_MARK, + LGPIO7_MARK, LGPIO6_MARK, LGPIO5_MARK, LGPIO4_MARK, + LGPIO3_MARK, LGPIO2_MARK, LGPIO1_MARK, LGPIO0_MARK, + + /* PTB (mobule: LBSC, EtherC, SIM, LPC) */ + D15_MARK, D14_MARK, D13_MARK, D12_MARK, + D11_MARK, D10_MARK, D9_MARK, D8_MARK, + ET0_MDC_MARK, ET0_MDIO_MARK, ET1_MDC_MARK, ET1_MDIO_MARK, + SIM_D_MARK, SIM_CLK_MARK, SIM_RST_MARK, + WPSZ1_MARK, WPSZ0_MARK, FWID_MARK, FLSHSZ_MARK, + LPC_SPIEN_MARK, BASEL_MARK, + + /* PTC (mobule: SD) */ + SD_WP_MARK, SD_CD_MARK, SD_CLK_MARK, SD_CMD_MARK, + SD_D3_MARK, SD_D2_MARK, SD_D1_MARK, SD_D0_MARK, + + /* PTD (mobule: INTC, SPI0, LBSC, CPG, ADC) */ + IRQ7_MARK, IRQ6_MARK, IRQ5_MARK, IRQ4_MARK, + IRQ3_MARK, IRQ2_MARK, IRQ1_MARK, IRQ0_MARK, + MD6_MARK, MD5_MARK, MD3_MARK, MD2_MARK, + MD1_MARK, MD0_MARK, ADTRG1_MARK, ADTRG0_MARK, + + /* PTE (mobule: EtherC) */ + ET0_CRS_DV_MARK, ET0_TXD1_MARK, + ET0_TXD0_MARK, ET0_TX_EN_MARK, + ET0_REF_CLK_MARK, ET0_RXD1_MARK, + ET0_RXD0_MARK, ET0_RX_ER_MARK, + + /* PTF (mobule: EtherC) */ + ET1_CRS_DV_MARK, ET1_TXD1_MARK, + ET1_TXD0_MARK, ET1_TX_EN_MARK, + ET1_REF_CLK_MARK, ET1_RXD1_MARK, + ET1_RXD0_MARK, ET1_RX_ER_MARK, + + /* PTG (mobule: SYSTEM, PWMX, LPC) */ + STATUS0_MARK, STATUS1_MARK, + PWX0_MARK, PWX1_MARK, PWX2_MARK, PWX3_MARK, + SERIRQ_MARK, CLKRUN_MARK, LPCPD_MARK, LDRQ_MARK, + + /* PTH (mobule: TMU, SCIF234, SPI1, SPI0) */ + TCLK_MARK, RXD4_MARK, TXD4_MARK, + SP1_MOSI_MARK, SP1_MISO_MARK, SP1_SCK_MARK, SP1_SCK_FB_MARK, + SP1_SS0_MARK, SP1_SS1_MARK, SP0_SS1_MARK, + + /* PTI (mobule: INTC) */ + IRQ15_MARK, IRQ14_MARK, IRQ13_MARK, IRQ12_MARK, + IRQ11_MARK, IRQ10_MARK, IRQ9_MARK, IRQ8_MARK, + + /* PTJ (mobule: SCIF234, SERMUX) */ + RXD3_MARK, TXD3_MARK, RXD2_MARK, TXD2_MARK, + COM1_TXD_MARK, COM1_RXD_MARK, COM1_RTS_MARK, COM1_CTS_MARK, + + /* PTK (mobule: SERMUX) */ + COM2_TXD_MARK, COM2_RXD_MARK, COM2_RTS_MARK, COM2_CTS_MARK, + COM2_DTR_MARK, COM2_DSR_MARK, COM2_DCD_MARK, COM2_RI_MARK, + + /* PTL (mobule: SERMUX) */ + RAC_TXD_MARK, RAC_RXD_MARK, RAC_RTS_MARK, RAC_CTS_MARK, + RAC_DTR_MARK, RAC_DSR_MARK, RAC_DCD_MARK, RAC_RI_MARK, + + /* PTM (mobule: IIC, LPC) */ + SDA6_MARK, SCL6_MARK, SDA7_MARK, SCL7_MARK, + WP_MARK, FMS0_MARK, FMS1_MARK, + + /* PTN (mobule: SCIF234, EVC) */ + SCK2_MARK, RTS4_MARK, RTS3_MARK, RTS2_MARK, + CTS4_MARK, CTS3_MARK, CTS2_MARK, + EVENT7_MARK, EVENT6_MARK, EVENT5_MARK, EVENT4_MARK, + EVENT3_MARK, EVENT2_MARK, EVENT1_MARK, EVENT0_MARK, + + /* PTO (mobule: SGPIO) */ + SGPIO0_CLK_MARK, SGPIO0_LOAD_MARK, + SGPIO0_DI_MARK, SGPIO0_DO_MARK, + SGPIO1_CLK_MARK, SGPIO1_LOAD_MARK, + SGPIO1_DI_MARK, SGPIO1_DO_MARK, + + /* PTP (mobule: JMC, SCIF234) */ + JMCTCK_MARK, JMCTMS_MARK, JMCTDO_MARK, JMCTDI_MARK, + JMCRST_MARK, SCK4_MARK, SCK3_MARK, + + /* PTQ (mobule: LPC) */ + LAD3_MARK, LAD2_MARK, LAD1_MARK, LAD0_MARK, + LFRAME_MARK, LRESET_MARK, LCLK_MARK, + + /* PTR (mobule: GRA, IIC) */ + DDC3_MARK, DDC2_MARK, + SDA8_MARK, SCL8_MARK, SDA2_MARK, SCL2_MARK, + SDA1_MARK, SCL1_MARK, SDA0_MARK, SCL0_MARK, + + /* PTS (mobule: GRA, IIC) */ + DDC1_MARK, DDC0_MARK, + SDA9_MARK, SCL9_MARK, SDA5_MARK, SCL5_MARK, + SDA4_MARK, SCL4_MARK, SDA3_MARK, SCL3_MARK, + + /* PTT (mobule: SYSTEM, PWMX) */ + AUDSYNC_MARK, AUDCK_MARK, + AUDATA3_MARK, AUDATA2_MARK, + AUDATA1_MARK, AUDATA0_MARK, + PWX7_MARK, PWX6_MARK, PWX5_MARK, PWX4_MARK, + + /* PTU (mobule: LBSC, DMAC) */ + CS6_MARK, CS5_MARK, CS4_MARK, CS0_MARK, + RD_MARK, WE0_MARK, A25_MARK, A24_MARK, + DREQ0_MARK, DACK0_MARK, + + /* PTV (mobule: LBSC, DMAC) */ + A23_MARK, A22_MARK, A21_MARK, A20_MARK, + A19_MARK, A18_MARK, A17_MARK, A16_MARK, + TEND0_MARK, DREQ1_MARK, DACK1_MARK, TEND1_MARK, + + /* PTW (mobule: LBSC) */ + A15_MARK, A14_MARK, A13_MARK, A12_MARK, + A11_MARK, A10_MARK, A9_MARK, A8_MARK, + + /* PTX (mobule: LBSC) */ + A7_MARK, A6_MARK, A5_MARK, A4_MARK, + A3_MARK, A2_MARK, A1_MARK, A0_MARK, + + /* PTY (mobule: LBSC) */ + D7_MARK, D6_MARK, D5_MARK, D4_MARK, + D3_MARK, D2_MARK, D1_MARK, D0_MARK, + PINMUX_MARK_END, +}; + +static pinmux_enum_t pinmux_data[] = { + /* PTA GPIO */ + PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT), + PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT), + PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT), + PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT), + PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT), + PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT), + PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT), + PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT), + + /* PTB GPIO */ + PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT), + PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT), + PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT), + PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT), + PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT), + PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT), + PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT), + PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT), + + /* PTC GPIO */ + PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT), + PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT), + PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT), + PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT), + PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT), + PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT), + PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT), + PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT), + + /* PTD GPIO */ + PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT), + PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT), + PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT), + PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT), + PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT), + PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT), + PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT), + PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT), + + /* PTE GPIO */ + PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT), + PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT), + PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT), + PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT), + PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT), + PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT), + + /* PTF GPIO */ + PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT), + PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT), + PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT), + PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT), + PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT), + PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT), + PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT), + PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT), + + /* PTG GPIO */ + PINMUX_DATA(PTG7_DATA, PTG7_IN, PTG7_OUT), + PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT), + PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT), + PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT), + PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT), + PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT), + PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT), + PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT), + + /* PTH GPIO */ + PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT), + PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT), + PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT), + PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT), + PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT), + PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT), + PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT), + PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT), + + /* PTI GPIO */ + PINMUX_DATA(PTI7_DATA, PTI7_IN, PTI7_OUT), + PINMUX_DATA(PTI6_DATA, PTI6_IN, PTI6_OUT), + PINMUX_DATA(PTI5_DATA, PTI5_IN, PTI5_OUT), + PINMUX_DATA(PTI4_DATA, PTI4_IN, PTI4_OUT), + PINMUX_DATA(PTI3_DATA, PTI3_IN, PTI3_OUT), + PINMUX_DATA(PTI2_DATA, PTI2_IN, PTI2_OUT), + PINMUX_DATA(PTI1_DATA, PTI1_IN, PTI1_OUT), + PINMUX_DATA(PTI0_DATA, PTI0_IN, PTI0_OUT), + + /* PTJ GPIO */ + PINMUX_DATA(PTJ7_DATA, PTJ7_IN, PTJ7_OUT), + PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT), + PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT), + PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT), + PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT), + PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT), + PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT), + PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT), + + /* PTK GPIO */ + PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT), + PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT), + PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT), + PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT), + PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT), + PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT), + PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT), + PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT), + + /* PTL GPIO */ + PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT), + PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT), + PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT), + PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT), + PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT), + PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT), + PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT), + PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT), + + /* PTM GPIO */ + PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT), + PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT), + PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT), + PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT), + PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT), + PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT), + PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT), + + /* PTN GPIO */ + PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT), + PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT), + PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT), + PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT), + PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT), + PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT), + PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT), + PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT), + + /* PTO GPIO */ + PINMUX_DATA(PTO7_DATA, PTO7_IN, PTO7_OUT), + PINMUX_DATA(PTO6_DATA, PTO6_IN, PTO6_OUT), + PINMUX_DATA(PTO5_DATA, PTO5_IN, PTO5_OUT), + PINMUX_DATA(PTO4_DATA, PTO4_IN, PTO4_OUT), + PINMUX_DATA(PTO3_DATA, PTO3_IN, PTO3_OUT), + PINMUX_DATA(PTO2_DATA, PTO2_IN, PTO2_OUT), + PINMUX_DATA(PTO1_DATA, PTO1_IN, PTO1_OUT), + PINMUX_DATA(PTO0_DATA, PTO0_IN, PTO0_OUT), + + /* PTQ GPIO */ + PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT), + PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT), + PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT), + PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT), + PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT), + PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT), + PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT), + + /* PTR GPIO */ + PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT), + PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT), + PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT), + PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT), + PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT), + PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT), + PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT), + PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT), + + /* PTS GPIO */ + PINMUX_DATA(PTS7_DATA, PTS7_IN, PTS7_OUT), + PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT), + PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT), + PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT), + PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT), + PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT), + PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT), + PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT), + + /* PTT GPIO */ + PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT), + PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT), + PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT), + PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT), + PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT), + PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT), + + /* PTU GPIO */ + PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT), + PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT), + PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT), + PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT), + PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT), + PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT), + PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT), + PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT), + + /* PTV GPIO */ + PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT), + PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT), + PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT), + PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT), + PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT), + PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT), + PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT), + PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT), + + /* PTW GPIO */ + PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT), + PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT), + PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT), + PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT), + PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT), + PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT), + PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT), + PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT), + + /* PTX GPIO */ + PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT), + PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT), + PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT), + PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT), + PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT), + PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT), + PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT), + PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT), + + /* PTY GPIO */ + PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT), + PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT), + PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT), + PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT), + PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT), + PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT), + PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT), + PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT), + + /* PTZ GPIO */ + PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT), + PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT), + PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT), + PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT), + PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT), + PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT), + PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT), + PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT), + + /* PTA FN */ + PINMUX_DATA(BS_MARK, PS0_15_FN1, PTA7_FN), + PINMUX_DATA(LGPIO7_MARK, PS0_15_FN3, PTA7_FN), + PINMUX_DATA(RDWR_MARK, PS0_14_FN1, PTA6_FN), + PINMUX_DATA(LGPIO6_MARK, PS0_14_FN3, PTA6_FN), + PINMUX_DATA(WE1_MARK, PS0_13_FN1, PTA5_FN), + PINMUX_DATA(LGPIO5_MARK, PS0_13_FN3, PTA5_FN), + PINMUX_DATA(RDY_MARK, PS0_12_FN1, PTA4_FN), + PINMUX_DATA(LGPIO4_MARK, PS0_12_FN3, PTA4_FN), + PINMUX_DATA(LGPIO3_MARK, PTA3_FN), + PINMUX_DATA(LGPIO2_MARK, PTA2_FN), + PINMUX_DATA(LGPIO1_MARK, PTA1_FN), + PINMUX_DATA(LGPIO0_MARK, PTA0_FN), + + /* PTB FN */ + PINMUX_DATA(D15_MARK, PS0_7_FN1, PTB7_FN), + PINMUX_DATA(ET0_MDC_MARK, PS0_7_FN2, PTB7_FN), + PINMUX_DATA(D14_MARK, PS0_6_FN1, PTB6_FN), + PINMUX_DATA(ET0_MDIO_MARK, PS0_6_FN2, PTB6_FN), + PINMUX_DATA(D13_MARK, PS0_5_FN1, PTB5_FN), + PINMUX_DATA(ET1_MDC_MARK, PS0_5_FN2, PTB5_FN), + PINMUX_DATA(D12_MARK, PS0_4_FN1, PTB4_FN), + PINMUX_DATA(ET1_MDIO_MARK, PS0_4_FN2, PTB4_FN), + PINMUX_DATA(D11_MARK, PS0_3_FN1, PTB3_FN), + PINMUX_DATA(SIM_D_MARK, PS0_3_FN2, PTB3_FN), + PINMUX_DATA(D10_MARK, PS0_2_FN1, PTB2_FN), + PINMUX_DATA(SIM_CLK_MARK, PS0_2_FN2, PTB2_FN), + PINMUX_DATA(D9_MARK, PS0_1_FN1, PTB1_FN), + PINMUX_DATA(SIM_RST_MARK, PS0_1_FN2, PTB1_FN), + PINMUX_DATA(D8_MARK, PTB0_FN), + + /* PTC FN */ + PINMUX_DATA(SD_WP_MARK, PTC7_FN), + PINMUX_DATA(SD_CD_MARK, PTC6_FN), + PINMUX_DATA(SD_CLK_MARK, PTC5_FN), + PINMUX_DATA(SD_CMD_MARK, PTC4_FN), + PINMUX_DATA(SD_D3_MARK, PTC3_FN), + PINMUX_DATA(SD_D2_MARK, PTC2_FN), + PINMUX_DATA(SD_D1_MARK, PTC1_FN), + PINMUX_DATA(SD_D0_MARK, PTC0_FN), + + /* PTD FN */ + PINMUX_DATA(IRQ7_MARK, PS1_7_FN1, PTD7_FN), + PINMUX_DATA(ADTRG1_MARK, PS1_7_FN3, PTD7_FN), + PINMUX_DATA(IRQ6_MARK, PS1_6_FN1, PTD6_FN), + PINMUX_DATA(ADTRG0_MARK, PS1_6_FN3, PTD6_FN), + PINMUX_DATA(IRQ5_MARK, PTD5_FN), + PINMUX_DATA(IRQ4_MARK, PTD4_FN), + PINMUX_DATA(IRQ3_MARK, PTD3_FN), + PINMUX_DATA(IRQ2_MARK, PTD2_FN), + PINMUX_DATA(IRQ1_MARK, PTD1_FN), + PINMUX_DATA(IRQ0_MARK, PTD0_FN), + + /* PTE FN */ + PINMUX_DATA(ET0_CRS_DV_MARK, PTE7_FN), + PINMUX_DATA(ET0_TXD1_MARK, PTE6_FN), + PINMUX_DATA(ET0_TXD0_MARK, PTE5_FN), + PINMUX_DATA(ET0_TX_EN_MARK, PTE4_FN), + PINMUX_DATA(ET0_REF_CLK_MARK, PTE3_FN), + PINMUX_DATA(ET0_RXD1_MARK, PTE2_FN), + PINMUX_DATA(ET0_RXD0_MARK, PTE1_FN), + PINMUX_DATA(ET0_RX_ER_MARK, PTE0_FN), + + /* PTF FN */ + PINMUX_DATA(ET1_CRS_DV_MARK, PTF7_FN), + PINMUX_DATA(ET1_TXD1_MARK, PTF6_FN), + PINMUX_DATA(ET1_TXD0_MARK, PTF5_FN), + PINMUX_DATA(ET1_TX_EN_MARK, PTF4_FN), + PINMUX_DATA(ET1_REF_CLK_MARK, PTF3_FN), + PINMUX_DATA(ET1_RXD1_MARK, PTF2_FN), + PINMUX_DATA(ET1_RXD0_MARK, PTF1_FN), + PINMUX_DATA(ET1_RX_ER_MARK, PTF0_FN), + + /* PTG FN */ + PINMUX_DATA(PWX0_MARK, PTG7_FN), + PINMUX_DATA(PWX1_MARK, PTG6_FN), + PINMUX_DATA(STATUS0_MARK, PS2_13_FN1, PTG5_FN), + PINMUX_DATA(PWX2_MARK, PS2_13_FN3, PTG5_FN), + PINMUX_DATA(STATUS1_MARK, PS2_12_FN1, PTG4_FN), + PINMUX_DATA(PWX3_MARK, PS2_12_FN3, PTG4_FN), + PINMUX_DATA(SERIRQ_MARK, PTG3_FN), + PINMUX_DATA(CLKRUN_MARK, PTG2_FN), + PINMUX_DATA(LPCPD_MARK, PTG1_FN), + PINMUX_DATA(LDRQ_MARK, PTG0_FN), + + /* PTH FN */ + PINMUX_DATA(SP1_MOSI_MARK, PTH7_FN), + PINMUX_DATA(SP1_MISO_MARK, PTH6_FN), + PINMUX_DATA(SP1_SCK_MARK, PTH5_FN), + PINMUX_DATA(SP1_SCK_FB_MARK, PTH4_FN), + PINMUX_DATA(SP1_SS0_MARK, PTH3_FN), + PINMUX_DATA(TCLK_MARK, PTH2_FN), + PINMUX_DATA(RXD4_MARK, PS2_1_FN1, PTH1_FN), + PINMUX_DATA(SP1_SS1_MARK, PS2_1_FN2, PTH1_FN), + PINMUX_DATA(TXD4_MARK, PS2_0_FN1, PTH0_FN), + PINMUX_DATA(SP0_SS1_MARK, PS2_0_FN2, PTH0_FN), + + /* PTI FN */ + PINMUX_DATA(IRQ15_MARK, PTI7_FN), + PINMUX_DATA(IRQ14_MARK, PTI6_FN), + PINMUX_DATA(IRQ13_MARK, PTI5_FN), + PINMUX_DATA(IRQ12_MARK, PTI4_FN), + PINMUX_DATA(IRQ11_MARK, PTI3_FN), + PINMUX_DATA(IRQ10_MARK, PTI2_FN), + PINMUX_DATA(IRQ9_MARK, PTI1_FN), + PINMUX_DATA(IRQ8_MARK, PTI0_FN), + + /* PTJ FN */ + PINMUX_DATA(RXD3_MARK, PTJ7_FN), + PINMUX_DATA(TXD3_MARK, PTJ6_FN), + PINMUX_DATA(RXD2_MARK, PTJ5_FN), + PINMUX_DATA(TXD2_MARK, PTJ4_FN), + PINMUX_DATA(COM1_TXD_MARK, PTJ3_FN), + PINMUX_DATA(COM1_RXD_MARK, PTJ2_FN), + PINMUX_DATA(COM1_RTS_MARK, PTJ1_FN), + PINMUX_DATA(COM1_CTS_MARK, PTJ0_FN), + + /* PTK FN */ + PINMUX_DATA(COM2_TXD_MARK, PTK7_FN), + PINMUX_DATA(COM2_RXD_MARK, PTK6_FN), + PINMUX_DATA(COM2_RTS_MARK, PTK5_FN), + PINMUX_DATA(COM2_CTS_MARK, PTK4_FN), + PINMUX_DATA(COM2_DTR_MARK, PTK3_FN), + PINMUX_DATA(COM2_DSR_MARK, PTK2_FN), + PINMUX_DATA(COM2_DCD_MARK, PTK1_FN), + PINMUX_DATA(COM2_RI_MARK, PTK0_FN), + + /* PTL FN */ + PINMUX_DATA(RAC_TXD_MARK, PTL7_FN), + PINMUX_DATA(RAC_RXD_MARK, PTL6_FN), + PINMUX_DATA(RAC_RTS_MARK, PTL5_FN), + PINMUX_DATA(RAC_CTS_MARK, PTL4_FN), + PINMUX_DATA(RAC_DTR_MARK, PTL3_FN), + PINMUX_DATA(RAC_DSR_MARK, PTL2_FN), + PINMUX_DATA(RAC_DCD_MARK, PTL1_FN), + PINMUX_DATA(RAC_RI_MARK, PTL0_FN), + + /* PTM FN */ + PINMUX_DATA(WP_MARK, PTM6_FN), + PINMUX_DATA(FMS0_MARK, PTM5_FN), + PINMUX_DATA(FMS1_MARK, PTM4_FN), + PINMUX_DATA(SDA6_MARK, PTM3_FN), + PINMUX_DATA(SCL6_MARK, PTM2_FN), + PINMUX_DATA(SDA7_MARK, PTM1_FN), + PINMUX_DATA(SCL7_MARK, PTM0_FN), + + /* PTN FN */ + PINMUX_DATA(SCK2_MARK, PS4_15_FN1, PTN7_FN), + PINMUX_DATA(EVENT7_MARK, PS4_15_FN2, PTN7_FN), + PINMUX_DATA(RTS4_MARK, PS4_14_FN1, PTN6_FN), + PINMUX_DATA(EVENT6_MARK, PS4_14_FN2, PTN6_FN), + PINMUX_DATA(RTS3_MARK, PS4_13_FN1, PTN5_FN), + PINMUX_DATA(EVENT5_MARK, PS4_13_FN2, PTN5_FN), + PINMUX_DATA(RTS2_MARK, PS4_12_FN1, PTN4_FN), + PINMUX_DATA(EVENT4_MARK, PS4_12_FN2, PTN4_FN), + PINMUX_DATA(CTS4_MARK, PS4_11_FN1, PTN3_FN), + PINMUX_DATA(EVENT3_MARK, PS4_11_FN2, PTN3_FN), + PINMUX_DATA(CTS3_MARK, PS4_10_FN1, PTN2_FN), + PINMUX_DATA(EVENT2_MARK, PS4_10_FN2, PTN2_FN), + PINMUX_DATA(CTS2_MARK, PS4_9_FN1, PTN1_FN), + PINMUX_DATA(EVENT1_MARK, PS4_9_FN2, PTN1_FN), + PINMUX_DATA(EVENT0_MARK, PTN0_FN), + + /* PTO FN */ + PINMUX_DATA(SGPIO0_CLK_MARK, PTO7_FN), + PINMUX_DATA(SGPIO0_LOAD_MARK, PTO6_FN), + PINMUX_DATA(SGPIO0_DI_MARK, PTO5_FN), + PINMUX_DATA(SGPIO0_DO_MARK, PTO4_FN), + PINMUX_DATA(SGPIO1_CLK_MARK, PTO3_FN), + PINMUX_DATA(SGPIO1_LOAD_MARK, PTO2_FN), + PINMUX_DATA(SGPIO1_DI_MARK, PTO1_FN), + PINMUX_DATA(SGPIO1_DO_MARK, PTO0_FN), + + /* PTP FN */ + PINMUX_DATA(JMCTCK_MARK, PTP6_FN), + PINMUX_DATA(JMCTMS_MARK, PTP5_FN), + PINMUX_DATA(JMCTDO_MARK, PTP4_FN), + PINMUX_DATA(JMCTDI_MARK, PTP3_FN), + PINMUX_DATA(JMCRST_MARK, PTP2_FN), + PINMUX_DATA(SCK4_MARK, PTP1_FN), + PINMUX_DATA(SCK3_MARK, PTP0_FN), + + /* PTQ FN */ + PINMUX_DATA(LAD3_MARK, PTQ6_FN), + PINMUX_DATA(LAD2_MARK, PTQ5_FN), + PINMUX_DATA(LAD1_MARK, PTQ4_FN), + PINMUX_DATA(LAD0_MARK, PTQ3_FN), + PINMUX_DATA(LFRAME_MARK, PTQ2_FN), + PINMUX_DATA(SCK4_MARK, PTQ1_FN), + PINMUX_DATA(SCK3_MARK, PTQ0_FN), + + /* PTR FN */ + PINMUX_DATA(SDA8_MARK, PTR7_FN), /* DDC3? */ + PINMUX_DATA(SCL8_MARK, PTR6_FN), /* DDC2? */ + PINMUX_DATA(SDA2_MARK, PTR5_FN), + PINMUX_DATA(SCL2_MARK, PTR4_FN), + PINMUX_DATA(SDA1_MARK, PTR3_FN), + PINMUX_DATA(SCL1_MARK, PTR2_FN), + PINMUX_DATA(SDA0_MARK, PTR1_FN), + PINMUX_DATA(SCL0_MARK, PTR0_FN), + + /* PTS FN */ + PINMUX_DATA(SDA9_MARK, PTS7_FN), /* DDC1? */ + PINMUX_DATA(SCL9_MARK, PTS6_FN), /* DDC0? */ + PINMUX_DATA(SDA5_MARK, PTS5_FN), + PINMUX_DATA(SCL5_MARK, PTS4_FN), + PINMUX_DATA(SDA4_MARK, PTS3_FN), + PINMUX_DATA(SCL4_MARK, PTS2_FN), + PINMUX_DATA(SDA3_MARK, PTS1_FN), + PINMUX_DATA(SCL3_MARK, PTS0_FN), + + /* PTT FN */ + PINMUX_DATA(AUDSYNC_MARK, PTS5_FN), + PINMUX_DATA(AUDCK_MARK, PTS4_FN), + PINMUX_DATA(AUDATA3_MARK, PS4_3_FN1, PTS3_FN), + PINMUX_DATA(PWX7_MARK, PS4_3_FN2, PTS3_FN), + PINMUX_DATA(AUDATA2_MARK, PS4_2_FN1, PTS2_FN), + PINMUX_DATA(PWX6_MARK, PS4_2_FN2, PTS2_FN), + PINMUX_DATA(AUDATA1_MARK, PS4_1_FN1, PTS1_FN), + PINMUX_DATA(PWX5_MARK, PS4_1_FN2, PTS1_FN), + PINMUX_DATA(AUDATA0_MARK, PS4_0_FN1, PTS0_FN), + PINMUX_DATA(PWX4_MARK, PS4_0_FN2, PTS0_FN), + + /* PTU FN */ + PINMUX_DATA(CS6_MARK, PTU7_FN), + PINMUX_DATA(CS5_MARK, PTU6_FN), + PINMUX_DATA(CS4_MARK, PTU5_FN), + PINMUX_DATA(CS0_MARK, PTU4_FN), + PINMUX_DATA(RD_MARK, PTU3_FN), + PINMUX_DATA(WE0_MARK, PTU2_FN), + PINMUX_DATA(A25_MARK, PS5_9_FN1, PTU1_FN), + PINMUX_DATA(DREQ0_MARK, PS5_9_FN2, PTU1_FN), + PINMUX_DATA(A24_MARK, PS5_8_FN1, PTU0_FN), + PINMUX_DATA(DACK0_MARK, PS5_8_FN2, PTU0_FN), + + /* PTV FN */ + PINMUX_DATA(A23_MARK, PS5_7_FN1, PTV7_FN), + PINMUX_DATA(TEND0_MARK, PS5_7_FN2, PTV7_FN), + PINMUX_DATA(A22_MARK, PS5_6_FN1, PTV6_FN), + PINMUX_DATA(DREQ1_MARK, PS5_6_FN2, PTV6_FN), + PINMUX_DATA(A21_MARK, PS5_5_FN1, PTV5_FN), + PINMUX_DATA(DACK1_MARK, PS5_5_FN2, PTV5_FN), + PINMUX_DATA(A20_MARK, PS5_4_FN1, PTV4_FN), + PINMUX_DATA(TEND1_MARK, PS5_4_FN2, PTV4_FN), + PINMUX_DATA(A19_MARK, PTV3_FN), + PINMUX_DATA(A18_MARK, PTV2_FN), + PINMUX_DATA(A17_MARK, PTV1_FN), + PINMUX_DATA(A16_MARK, PTV0_FN), + + /* PTW FN */ + PINMUX_DATA(A15_MARK, PTW7_FN), + PINMUX_DATA(A14_MARK, PTW6_FN), + PINMUX_DATA(A13_MARK, PTW5_FN), + PINMUX_DATA(A12_MARK, PTW4_FN), + PINMUX_DATA(A11_MARK, PTW3_FN), + PINMUX_DATA(A10_MARK, PTW2_FN), + PINMUX_DATA(A9_MARK, PTW1_FN), + PINMUX_DATA(A8_MARK, PTW0_FN), + + /* PTX FN */ + PINMUX_DATA(A7_MARK, PTX7_FN), + PINMUX_DATA(A6_MARK, PTX6_FN), + PINMUX_DATA(A5_MARK, PTX5_FN), + PINMUX_DATA(A4_MARK, PTX4_FN), + PINMUX_DATA(A3_MARK, PTX3_FN), + PINMUX_DATA(A2_MARK, PTX2_FN), + PINMUX_DATA(A1_MARK, PTX1_FN), + PINMUX_DATA(A0_MARK, PTX0_FN), + + /* PTY FN */ + PINMUX_DATA(D7_MARK, PTY7_FN), + PINMUX_DATA(D6_MARK, PTY6_FN), + PINMUX_DATA(D5_MARK, PTY5_FN), + PINMUX_DATA(D4_MARK, PTY4_FN), + PINMUX_DATA(D3_MARK, PTY3_FN), + PINMUX_DATA(D2_MARK, PTY2_FN), + PINMUX_DATA(D1_MARK, PTY1_FN), + PINMUX_DATA(D0_MARK, PTY0_FN), +}; + +static struct pinmux_gpio pinmux_gpios[] = { + /* PTA */ + PINMUX_GPIO(GPIO_PTA7, PTA7_DATA), + PINMUX_GPIO(GPIO_PTA6, PTA6_DATA), + PINMUX_GPIO(GPIO_PTA5, PTA5_DATA), + PINMUX_GPIO(GPIO_PTA4, PTA4_DATA), + PINMUX_GPIO(GPIO_PTA3, PTA3_DATA), + PINMUX_GPIO(GPIO_PTA2, PTA2_DATA), + PINMUX_GPIO(GPIO_PTA1, PTA1_DATA), + PINMUX_GPIO(GPIO_PTA0, PTA0_DATA), + + /* PTB */ + PINMUX_GPIO(GPIO_PTB7, PTB7_DATA), + PINMUX_GPIO(GPIO_PTB6, PTB6_DATA), + PINMUX_GPIO(GPIO_PTB5, PTB5_DATA), + PINMUX_GPIO(GPIO_PTB4, PTB4_DATA), + PINMUX_GPIO(GPIO_PTB3, PTB3_DATA), + PINMUX_GPIO(GPIO_PTB2, PTB2_DATA), + PINMUX_GPIO(GPIO_PTB1, PTB1_DATA), + PINMUX_GPIO(GPIO_PTB0, PTB0_DATA), + + /* PTC */ + PINMUX_GPIO(GPIO_PTC7, PTC7_DATA), + PINMUX_GPIO(GPIO_PTC6, PTC6_DATA), + PINMUX_GPIO(GPIO_PTC5, PTC5_DATA), + PINMUX_GPIO(GPIO_PTC4, PTC4_DATA), + PINMUX_GPIO(GPIO_PTC3, PTC3_DATA), + PINMUX_GPIO(GPIO_PTC2, PTC2_DATA), + PINMUX_GPIO(GPIO_PTC1, PTC1_DATA), + PINMUX_GPIO(GPIO_PTC0, PTC0_DATA), + + /* PTD */ + PINMUX_GPIO(GPIO_PTD7, PTD7_DATA), + PINMUX_GPIO(GPIO_PTD6, PTD6_DATA), + PINMUX_GPIO(GPIO_PTD5, PTD5_DATA), + PINMUX_GPIO(GPIO_PTD4, PTD4_DATA), + PINMUX_GPIO(GPIO_PTD3, PTD3_DATA), + PINMUX_GPIO(GPIO_PTD2, PTD2_DATA), + PINMUX_GPIO(GPIO_PTD1, PTD1_DATA), + PINMUX_GPIO(GPIO_PTD0, PTD0_DATA), + + /* PTE */ + PINMUX_GPIO(GPIO_PTE7, PTE7_DATA), + PINMUX_GPIO(GPIO_PTE6, PTE6_DATA), + PINMUX_GPIO(GPIO_PTE5, PTE5_DATA), + PINMUX_GPIO(GPIO_PTE4, PTE4_DATA), + PINMUX_GPIO(GPIO_PTE3, PTE3_DATA), + PINMUX_GPIO(GPIO_PTE2, PTE2_DATA), + PINMUX_GPIO(GPIO_PTE1, PTE1_DATA), + PINMUX_GPIO(GPIO_PTE0, PTE0_DATA), + + /* PTF */ + PINMUX_GPIO(GPIO_PTF7, PTF7_DATA), + PINMUX_GPIO(GPIO_PTF6, PTF6_DATA), + PINMUX_GPIO(GPIO_PTF5, PTF5_DATA), + PINMUX_GPIO(GPIO_PTF4, PTF4_DATA), + PINMUX_GPIO(GPIO_PTF3, PTF3_DATA), + PINMUX_GPIO(GPIO_PTF2, PTF2_DATA), + PINMUX_GPIO(GPIO_PTF1, PTF1_DATA), + PINMUX_GPIO(GPIO_PTF0, PTF0_DATA), + + /* PTG */ + PINMUX_GPIO(GPIO_PTG7, PTG7_DATA), + PINMUX_GPIO(GPIO_PTG6, PTG6_DATA), + PINMUX_GPIO(GPIO_PTG5, PTG5_DATA), + PINMUX_GPIO(GPIO_PTG4, PTG4_DATA), + PINMUX_GPIO(GPIO_PTG3, PTG3_DATA), + PINMUX_GPIO(GPIO_PTG2, PTG2_DATA), + PINMUX_GPIO(GPIO_PTG1, PTG1_DATA), + PINMUX_GPIO(GPIO_PTG0, PTG0_DATA), + + /* PTH */ + PINMUX_GPIO(GPIO_PTH7, PTH7_DATA), + PINMUX_GPIO(GPIO_PTH6, PTH6_DATA), + PINMUX_GPIO(GPIO_PTH5, PTH5_DATA), + PINMUX_GPIO(GPIO_PTH4, PTH4_DATA), + PINMUX_GPIO(GPIO_PTH3, PTH3_DATA), + PINMUX_GPIO(GPIO_PTH2, PTH2_DATA), + PINMUX_GPIO(GPIO_PTH1, PTH1_DATA), + PINMUX_GPIO(GPIO_PTH0, PTH0_DATA), + + /* PTI */ + PINMUX_GPIO(GPIO_PTI7, PTI7_DATA), + PINMUX_GPIO(GPIO_PTI6, PTI6_DATA), + PINMUX_GPIO(GPIO_PTI5, PTI5_DATA), + PINMUX_GPIO(GPIO_PTI4, PTI4_DATA), + PINMUX_GPIO(GPIO_PTI3, PTI3_DATA), + PINMUX_GPIO(GPIO_PTI2, PTI2_DATA), + PINMUX_GPIO(GPIO_PTI1, PTI1_DATA), + PINMUX_GPIO(GPIO_PTI0, PTI0_DATA), + + /* PTJ */ + PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA), + PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA), + PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA), + PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA), + PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA), + PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA), + PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA), + PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA), + + /* PTK */ + PINMUX_GPIO(GPIO_PTK7, PTK7_DATA), + PINMUX_GPIO(GPIO_PTK6, PTK6_DATA), + PINMUX_GPIO(GPIO_PTK5, PTK5_DATA), + PINMUX_GPIO(GPIO_PTK4, PTK4_DATA), + PINMUX_GPIO(GPIO_PTK3, PTK3_DATA), + PINMUX_GPIO(GPIO_PTK2, PTK2_DATA), + PINMUX_GPIO(GPIO_PTK1, PTK1_DATA), + PINMUX_GPIO(GPIO_PTK0, PTK0_DATA), + + /* PTL */ + PINMUX_GPIO(GPIO_PTL7, PTL7_DATA), + PINMUX_GPIO(GPIO_PTL6, PTL6_DATA), + PINMUX_GPIO(GPIO_PTL5, PTL5_DATA), + PINMUX_GPIO(GPIO_PTL4, PTL4_DATA), + PINMUX_GPIO(GPIO_PTL3, PTL3_DATA), + PINMUX_GPIO(GPIO_PTL2, PTL2_DATA), + PINMUX_GPIO(GPIO_PTL1, PTL1_DATA), + PINMUX_GPIO(GPIO_PTL0, PTL0_DATA), + + /* PTM */ + PINMUX_GPIO(GPIO_PTM6, PTM6_DATA), + PINMUX_GPIO(GPIO_PTM5, PTM5_DATA), + PINMUX_GPIO(GPIO_PTM4, PTM4_DATA), + PINMUX_GPIO(GPIO_PTM3, PTM3_DATA), + PINMUX_GPIO(GPIO_PTM2, PTM2_DATA), + PINMUX_GPIO(GPIO_PTM1, PTM1_DATA), + PINMUX_GPIO(GPIO_PTM0, PTM0_DATA), + + /* PTN */ + PINMUX_GPIO(GPIO_PTN7, PTN7_DATA), + PINMUX_GPIO(GPIO_PTN6, PTN6_DATA), + PINMUX_GPIO(GPIO_PTN5, PTN5_DATA), + PINMUX_GPIO(GPIO_PTN4, PTN4_DATA), + PINMUX_GPIO(GPIO_PTN3, PTN3_DATA), + PINMUX_GPIO(GPIO_PTN2, PTN2_DATA), + PINMUX_GPIO(GPIO_PTN1, PTN1_DATA), + PINMUX_GPIO(GPIO_PTN0, PTN0_DATA), + + /* PTO */ + PINMUX_GPIO(GPIO_PTO7, PTO7_DATA), + PINMUX_GPIO(GPIO_PTO6, PTO6_DATA), + PINMUX_GPIO(GPIO_PTO5, PTO5_DATA), + PINMUX_GPIO(GPIO_PTO4, PTO4_DATA), + PINMUX_GPIO(GPIO_PTO3, PTO3_DATA), + PINMUX_GPIO(GPIO_PTO2, PTO2_DATA), + PINMUX_GPIO(GPIO_PTO1, PTO1_DATA), + PINMUX_GPIO(GPIO_PTO0, PTO0_DATA), + + /* PTP */ + PINMUX_GPIO(GPIO_PTP6, PTP6_DATA), + PINMUX_GPIO(GPIO_PTP5, PTP5_DATA), + PINMUX_GPIO(GPIO_PTP4, PTP4_DATA), + PINMUX_GPIO(GPIO_PTP3, PTP3_DATA), + PINMUX_GPIO(GPIO_PTP2, PTP2_DATA), + PINMUX_GPIO(GPIO_PTP1, PTP1_DATA), + PINMUX_GPIO(GPIO_PTP0, PTP0_DATA), + + /* PTQ */ + PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA), + PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA), + PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA), + PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA), + PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA), + PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA), + PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA), + + /* PTR */ + PINMUX_GPIO(GPIO_PTR7, PTR7_DATA), + PINMUX_GPIO(GPIO_PTR6, PTR6_DATA), + PINMUX_GPIO(GPIO_PTR5, PTR5_DATA), + PINMUX_GPIO(GPIO_PTR4, PTR4_DATA), + PINMUX_GPIO(GPIO_PTR3, PTR3_DATA), + PINMUX_GPIO(GPIO_PTR2, PTR2_DATA), + PINMUX_GPIO(GPIO_PTR1, PTR1_DATA), + PINMUX_GPIO(GPIO_PTR0, PTR0_DATA), + + /* PTS */ + PINMUX_GPIO(GPIO_PTS7, PTS7_DATA), + PINMUX_GPIO(GPIO_PTS6, PTS6_DATA), + PINMUX_GPIO(GPIO_PTS5, PTS5_DATA), + PINMUX_GPIO(GPIO_PTS4, PTS4_DATA), + PINMUX_GPIO(GPIO_PTS3, PTS3_DATA), + PINMUX_GPIO(GPIO_PTS2, PTS2_DATA), + PINMUX_GPIO(GPIO_PTS1, PTS1_DATA), + PINMUX_GPIO(GPIO_PTS0, PTS0_DATA), + + /* PTT */ + PINMUX_GPIO(GPIO_PTT5, PTT5_DATA), + PINMUX_GPIO(GPIO_PTT4, PTT4_DATA), + PINMUX_GPIO(GPIO_PTT3, PTT3_DATA), + PINMUX_GPIO(GPIO_PTT2, PTT2_DATA), + PINMUX_GPIO(GPIO_PTT1, PTT1_DATA), + PINMUX_GPIO(GPIO_PTT0, PTT0_DATA), + + /* PTU */ + PINMUX_GPIO(GPIO_PTU7, PTU7_DATA), + PINMUX_GPIO(GPIO_PTU6, PTU6_DATA), + PINMUX_GPIO(GPIO_PTU5, PTU5_DATA), + PINMUX_GPIO(GPIO_PTU4, PTU4_DATA), + PINMUX_GPIO(GPIO_PTU3, PTU3_DATA), + PINMUX_GPIO(GPIO_PTU2, PTU2_DATA), + PINMUX_GPIO(GPIO_PTU1, PTU1_DATA), + PINMUX_GPIO(GPIO_PTU0, PTU0_DATA), + + /* PTV */ + PINMUX_GPIO(GPIO_PTV7, PTV7_DATA), + PINMUX_GPIO(GPIO_PTV6, PTV6_DATA), + PINMUX_GPIO(GPIO_PTV5, PTV5_DATA), + PINMUX_GPIO(GPIO_PTV4, PTV4_DATA), + PINMUX_GPIO(GPIO_PTV3, PTV3_DATA), + PINMUX_GPIO(GPIO_PTV2, PTV2_DATA), + PINMUX_GPIO(GPIO_PTV1, PTV1_DATA), + PINMUX_GPIO(GPIO_PTV0, PTV0_DATA), + + /* PTW */ + PINMUX_GPIO(GPIO_PTW7, PTW7_DATA), + PINMUX_GPIO(GPIO_PTW6, PTW6_DATA), + PINMUX_GPIO(GPIO_PTW5, PTW5_DATA), + PINMUX_GPIO(GPIO_PTW4, PTW4_DATA), + PINMUX_GPIO(GPIO_PTW3, PTW3_DATA), + PINMUX_GPIO(GPIO_PTW2, PTW2_DATA), + PINMUX_GPIO(GPIO_PTW1, PTW1_DATA), + PINMUX_GPIO(GPIO_PTW0, PTW0_DATA), + + /* PTX */ + PINMUX_GPIO(GPIO_PTX7, PTX7_DATA), + PINMUX_GPIO(GPIO_PTX6, PTX6_DATA), + PINMUX_GPIO(GPIO_PTX5, PTX5_DATA), + PINMUX_GPIO(GPIO_PTX4, PTX4_DATA), + PINMUX_GPIO(GPIO_PTX3, PTX3_DATA), + PINMUX_GPIO(GPIO_PTX2, PTX2_DATA), + PINMUX_GPIO(GPIO_PTX1, PTX1_DATA), + PINMUX_GPIO(GPIO_PTX0, PTX0_DATA), + + /* PTY */ + PINMUX_GPIO(GPIO_PTY7, PTY7_DATA), + PINMUX_GPIO(GPIO_PTY6, PTY6_DATA), + PINMUX_GPIO(GPIO_PTY5, PTY5_DATA), + PINMUX_GPIO(GPIO_PTY4, PTY4_DATA), + PINMUX_GPIO(GPIO_PTY3, PTY3_DATA), + PINMUX_GPIO(GPIO_PTY2, PTY2_DATA), + PINMUX_GPIO(GPIO_PTY1, PTY1_DATA), + PINMUX_GPIO(GPIO_PTY0, PTY0_DATA), + + /* PTZ */ + PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA), + PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA), + PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA), + PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA), + PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA), + PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA), + PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA), + PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA), + + /* PTA (mobule: LBSC, CPG, LPC) */ + PINMUX_GPIO(GPIO_FN_BS, BS_MARK), + PINMUX_GPIO(GPIO_FN_RDWR, RDWR_MARK), + PINMUX_GPIO(GPIO_FN_WE1, WE1_MARK), + PINMUX_GPIO(GPIO_FN_RDY, RDY_MARK), + PINMUX_GPIO(GPIO_FN_MD10, MD10_MARK), + PINMUX_GPIO(GPIO_FN_MD9, MD9_MARK), + PINMUX_GPIO(GPIO_FN_MD8, MD8_MARK), + PINMUX_GPIO(GPIO_FN_LGPIO7, LGPIO7_MARK), + PINMUX_GPIO(GPIO_FN_LGPIO6, LGPIO6_MARK), + PINMUX_GPIO(GPIO_FN_LGPIO5, LGPIO5_MARK), + PINMUX_GPIO(GPIO_FN_LGPIO4, LGPIO4_MARK), + PINMUX_GPIO(GPIO_FN_LGPIO3, LGPIO3_MARK), + PINMUX_GPIO(GPIO_FN_LGPIO2, LGPIO2_MARK), + PINMUX_GPIO(GPIO_FN_LGPIO1, LGPIO1_MARK), + PINMUX_GPIO(GPIO_FN_LGPIO0, LGPIO0_MARK), + + /* PTB (mobule: LBSC, EtherC, SIM, LPC) */ + PINMUX_GPIO(GPIO_FN_D15, D15_MARK), + PINMUX_GPIO(GPIO_FN_D14, D14_MARK), + PINMUX_GPIO(GPIO_FN_D13, D13_MARK), + PINMUX_GPIO(GPIO_FN_D12, D12_MARK), + PINMUX_GPIO(GPIO_FN_D11, D11_MARK), + PINMUX_GPIO(GPIO_FN_D10, D10_MARK), + PINMUX_GPIO(GPIO_FN_D9, D9_MARK), + PINMUX_GPIO(GPIO_FN_D8, D8_MARK), + PINMUX_GPIO(GPIO_FN_ET0_MDC, ET0_MDC_MARK), + PINMUX_GPIO(GPIO_FN_ET0_MDIO, ET0_MDIO_MARK), + PINMUX_GPIO(GPIO_FN_ET1_MDC, ET1_MDC_MARK), + PINMUX_GPIO(GPIO_FN_ET1_MDIO, ET1_MDIO_MARK), + PINMUX_GPIO(GPIO_FN_WPSZ1, WPSZ1_MARK), + PINMUX_GPIO(GPIO_FN_WPSZ0, WPSZ0_MARK), + PINMUX_GPIO(GPIO_FN_FWID, FWID_MARK), + PINMUX_GPIO(GPIO_FN_FLSHSZ, FLSHSZ_MARK), + PINMUX_GPIO(GPIO_FN_LPC_SPIEN, LPC_SPIEN_MARK), + PINMUX_GPIO(GPIO_FN_BASEL, BASEL_MARK), + + /* PTC (mobule: SD) */ + PINMUX_GPIO(GPIO_FN_SD_WP, SD_WP_MARK), + PINMUX_GPIO(GPIO_FN_SD_CD, SD_CD_MARK), + PINMUX_GPIO(GPIO_FN_SD_CLK, SD_CLK_MARK), + PINMUX_GPIO(GPIO_FN_SD_CMD, SD_CMD_MARK), + PINMUX_GPIO(GPIO_FN_SD_D3, SD_D3_MARK), + PINMUX_GPIO(GPIO_FN_SD_D2, SD_D2_MARK), + PINMUX_GPIO(GPIO_FN_SD_D1, SD_D1_MARK), + PINMUX_GPIO(GPIO_FN_SD_D0, SD_D0_MARK), + + /* PTD (mobule: INTC, SPI0, LBSC, CPG, ADC) */ + PINMUX_GPIO(GPIO_FN_IRQ7, IRQ7_MARK), + PINMUX_GPIO(GPIO_FN_IRQ6, IRQ6_MARK), + PINMUX_GPIO(GPIO_FN_IRQ5, IRQ5_MARK), + PINMUX_GPIO(GPIO_FN_IRQ4, IRQ4_MARK), + PINMUX_GPIO(GPIO_FN_IRQ3, IRQ3_MARK), + PINMUX_GPIO(GPIO_FN_IRQ2, IRQ2_MARK), + PINMUX_GPIO(GPIO_FN_IRQ1, IRQ1_MARK), + PINMUX_GPIO(GPIO_FN_IRQ0, IRQ0_MARK), + PINMUX_GPIO(GPIO_FN_MD6, MD6_MARK), + PINMUX_GPIO(GPIO_FN_MD5, MD5_MARK), + PINMUX_GPIO(GPIO_FN_MD3, MD3_MARK), + PINMUX_GPIO(GPIO_FN_MD2, MD2_MARK), + PINMUX_GPIO(GPIO_FN_MD1, MD1_MARK), + PINMUX_GPIO(GPIO_FN_MD0, MD0_MARK), + PINMUX_GPIO(GPIO_FN_ADTRG1, ADTRG1_MARK), + PINMUX_GPIO(GPIO_FN_ADTRG0, ADTRG0_MARK), + + /* PTE (mobule: EtherC) */ + PINMUX_GPIO(GPIO_FN_ET0_CRS_DV, ET0_CRS_DV_MARK), + PINMUX_GPIO(GPIO_FN_ET0_TXD1, ET0_TXD1_MARK), + PINMUX_GPIO(GPIO_FN_ET0_TXD0, ET0_TXD0_MARK), + PINMUX_GPIO(GPIO_FN_ET0_TX_EN, ET0_TX_EN_MARK), + PINMUX_GPIO(GPIO_FN_ET0_REF_CLK, ET0_REF_CLK_MARK), + PINMUX_GPIO(GPIO_FN_ET0_RXD1, ET0_RXD1_MARK), + PINMUX_GPIO(GPIO_FN_ET0_RXD0, ET0_RXD0_MARK), + PINMUX_GPIO(GPIO_FN_ET0_RX_ER, ET0_RX_ER_MARK), + + /* PTF (mobule: EtherC) */ + PINMUX_GPIO(GPIO_FN_ET1_CRS_DV, ET1_CRS_DV_MARK), + PINMUX_GPIO(GPIO_FN_ET1_TXD1, ET1_TXD1_MARK), + PINMUX_GPIO(GPIO_FN_ET1_TXD0, ET1_TXD0_MARK), + PINMUX_GPIO(GPIO_FN_ET1_TX_EN, ET1_TX_EN_MARK), + PINMUX_GPIO(GPIO_FN_ET1_REF_CLK, ET1_REF_CLK_MARK), + PINMUX_GPIO(GPIO_FN_ET1_RXD1, ET1_RXD1_MARK), + PINMUX_GPIO(GPIO_FN_ET1_RXD0, ET1_RXD0_MARK), + PINMUX_GPIO(GPIO_FN_ET1_RX_ER, ET1_RX_ER_MARK), + + /* PTG (mobule: SYSTEM, PWMX, LPC) */ + PINMUX_GPIO(GPIO_FN_STATUS0, STATUS0_MARK), + PINMUX_GPIO(GPIO_FN_STATUS1, STATUS1_MARK), + PINMUX_GPIO(GPIO_FN_PWX0, PWX0_MARK), + PINMUX_GPIO(GPIO_FN_PWX1, PWX1_MARK), + PINMUX_GPIO(GPIO_FN_PWX2, PWX2_MARK), + PINMUX_GPIO(GPIO_FN_PWX3, PWX3_MARK), + PINMUX_GPIO(GPIO_FN_SERIRQ, SERIRQ_MARK), + PINMUX_GPIO(GPIO_FN_CLKRUN, CLKRUN_MARK), + PINMUX_GPIO(GPIO_FN_LPCPD, LPCPD_MARK), + PINMUX_GPIO(GPIO_FN_LDRQ, LDRQ_MARK), + + /* PTH (mobule: TMU, SCIF234, SPI1, SPI0) */ + PINMUX_GPIO(GPIO_FN_TCLK, TCLK_MARK), + PINMUX_GPIO(GPIO_FN_RXD4, RXD4_MARK), + PINMUX_GPIO(GPIO_FN_TXD4, TXD4_MARK), + PINMUX_GPIO(GPIO_FN_SP1_MOSI, SP1_MOSI_MARK), + PINMUX_GPIO(GPIO_FN_SP1_MISO, SP1_MISO_MARK), + PINMUX_GPIO(GPIO_FN_SP1_SCK, SP1_SCK_MARK), + PINMUX_GPIO(GPIO_FN_SP1_SCK_FB, SP1_SCK_FB_MARK), + PINMUX_GPIO(GPIO_FN_SP1_SS0, SP1_SS0_MARK), + PINMUX_GPIO(GPIO_FN_SP1_SS1, SP1_SS1_MARK), + PINMUX_GPIO(GPIO_FN_SP0_SS1, SP0_SS1_MARK), + + /* PTI (mobule: INTC) */ + PINMUX_GPIO(GPIO_FN_IRQ15, IRQ15_MARK), + PINMUX_GPIO(GPIO_FN_IRQ14, IRQ14_MARK), + PINMUX_GPIO(GPIO_FN_IRQ13, IRQ13_MARK), + PINMUX_GPIO(GPIO_FN_IRQ12, IRQ12_MARK), + PINMUX_GPIO(GPIO_FN_IRQ11, IRQ11_MARK), + PINMUX_GPIO(GPIO_FN_IRQ10, IRQ10_MARK), + PINMUX_GPIO(GPIO_FN_IRQ9, IRQ9_MARK), + PINMUX_GPIO(GPIO_FN_IRQ8, IRQ8_MARK), + + /* PTJ (mobule: SCIF234, SERMUX) */ + PINMUX_GPIO(GPIO_FN_RXD3, RXD3_MARK), + PINMUX_GPIO(GPIO_FN_TXD3, TXD3_MARK), + PINMUX_GPIO(GPIO_FN_RXD2, RXD2_MARK), + PINMUX_GPIO(GPIO_FN_TXD2, TXD2_MARK), + PINMUX_GPIO(GPIO_FN_COM1_TXD, COM1_TXD_MARK), + PINMUX_GPIO(GPIO_FN_COM1_RXD, COM1_RXD_MARK), + PINMUX_GPIO(GPIO_FN_COM1_RTS, COM1_RTS_MARK), + PINMUX_GPIO(GPIO_FN_COM1_CTS, COM1_CTS_MARK), + + /* PTK (mobule: SERMUX) */ + PINMUX_GPIO(GPIO_FN_COM2_TXD, COM2_TXD_MARK), + PINMUX_GPIO(GPIO_FN_COM2_RXD, COM2_RXD_MARK), + PINMUX_GPIO(GPIO_FN_COM2_RTS, COM2_RTS_MARK), + PINMUX_GPIO(GPIO_FN_COM2_CTS, COM2_CTS_MARK), + PINMUX_GPIO(GPIO_FN_COM2_DTR, COM2_DTR_MARK), + PINMUX_GPIO(GPIO_FN_COM2_DSR, COM2_DSR_MARK), + PINMUX_GPIO(GPIO_FN_COM2_DCD, COM2_DCD_MARK), + PINMUX_GPIO(GPIO_FN_COM2_RI, COM2_RI_MARK), + + /* PTL (mobule: SERMUX) */ + PINMUX_GPIO(GPIO_FN_RAC_TXD, RAC_TXD_MARK), + PINMUX_GPIO(GPIO_FN_RAC_RXD, RAC_RXD_MARK), + PINMUX_GPIO(GPIO_FN_RAC_RTS, RAC_RTS_MARK), + PINMUX_GPIO(GPIO_FN_RAC_CTS, RAC_CTS_MARK), + PINMUX_GPIO(GPIO_FN_RAC_DTR, RAC_DTR_MARK), + PINMUX_GPIO(GPIO_FN_RAC_DSR, RAC_DSR_MARK), + PINMUX_GPIO(GPIO_FN_RAC_DCD, RAC_DCD_MARK), + PINMUX_GPIO(GPIO_FN_RAC_RI, RAC_RI_MARK), + + /* PTM (mobule: IIC, LPC) */ + PINMUX_GPIO(GPIO_FN_SDA6, SDA6_MARK), + PINMUX_GPIO(GPIO_FN_SCL6, SCL6_MARK), + PINMUX_GPIO(GPIO_FN_SDA7, SDA7_MARK), + PINMUX_GPIO(GPIO_FN_SCL7, SCL7_MARK), + PINMUX_GPIO(GPIO_FN_WP, WP_MARK), + PINMUX_GPIO(GPIO_FN_FMS0, FMS0_MARK), + PINMUX_GPIO(GPIO_FN_FMS1, FMS1_MARK), + + /* PTN (mobule: SCIF234, EVC) */ + PINMUX_GPIO(GPIO_FN_SCK2, SCK2_MARK), + PINMUX_GPIO(GPIO_FN_RTS4, RTS4_MARK), + PINMUX_GPIO(GPIO_FN_RTS3, RTS3_MARK), + PINMUX_GPIO(GPIO_FN_RTS2, RTS2_MARK), + PINMUX_GPIO(GPIO_FN_CTS4, CTS4_MARK), + PINMUX_GPIO(GPIO_FN_CTS3, CTS3_MARK), + PINMUX_GPIO(GPIO_FN_CTS2, CTS2_MARK), + PINMUX_GPIO(GPIO_FN_EVENT7, EVENT7_MARK), + PINMUX_GPIO(GPIO_FN_EVENT6, EVENT6_MARK), + PINMUX_GPIO(GPIO_FN_EVENT5, EVENT5_MARK), + PINMUX_GPIO(GPIO_FN_EVENT4, EVENT4_MARK), + PINMUX_GPIO(GPIO_FN_EVENT3, EVENT3_MARK), + PINMUX_GPIO(GPIO_FN_EVENT2, EVENT2_MARK), + PINMUX_GPIO(GPIO_FN_EVENT1, EVENT1_MARK), + PINMUX_GPIO(GPIO_FN_EVENT0, EVENT0_MARK), + + /* PTO (mobule: SGPIO) */ + PINMUX_GPIO(GPIO_FN_SGPIO0_CLK, SGPIO0_CLK_MARK), + PINMUX_GPIO(GPIO_FN_SGPIO0_LOAD, SGPIO0_LOAD_MARK), + PINMUX_GPIO(GPIO_FN_SGPIO0_DI, SGPIO0_DI_MARK), + PINMUX_GPIO(GPIO_FN_SGPIO0_DO, SGPIO0_DO_MARK), + PINMUX_GPIO(GPIO_FN_SGPIO1_CLK, SGPIO1_CLK_MARK), + PINMUX_GPIO(GPIO_FN_SGPIO1_LOAD, SGPIO1_LOAD_MARK), + PINMUX_GPIO(GPIO_FN_SGPIO1_DI, SGPIO1_DI_MARK), + PINMUX_GPIO(GPIO_FN_SGPIO1_DO, SGPIO1_DO_MARK), + + /* PTP (mobule: JMC, SCIF234) */ + PINMUX_GPIO(GPIO_FN_JMCTCK, JMCTCK_MARK), + PINMUX_GPIO(GPIO_FN_JMCTMS, JMCTMS_MARK), + PINMUX_GPIO(GPIO_FN_JMCTDO, JMCTDO_MARK), + PINMUX_GPIO(GPIO_FN_JMCTDI, JMCTDI_MARK), + PINMUX_GPIO(GPIO_FN_JMCRST, JMCRST_MARK), + PINMUX_GPIO(GPIO_FN_SCK4, SCK4_MARK), + PINMUX_GPIO(GPIO_FN_SCK3, SCK3_MARK), + + /* PTQ (mobule: LPC) */ + PINMUX_GPIO(GPIO_FN_LAD3, LAD3_MARK), + PINMUX_GPIO(GPIO_FN_LAD2, LAD2_MARK), + PINMUX_GPIO(GPIO_FN_LAD1, LAD1_MARK), + PINMUX_GPIO(GPIO_FN_LAD0, LAD0_MARK), + PINMUX_GPIO(GPIO_FN_LFRAME, LFRAME_MARK), + PINMUX_GPIO(GPIO_FN_LRESET, LRESET_MARK), + PINMUX_GPIO(GPIO_FN_LCLK, LCLK_MARK), + + /* PTR (mobule: GRA, IIC) */ + PINMUX_GPIO(GPIO_FN_DDC3, DDC3_MARK), + PINMUX_GPIO(GPIO_FN_DDC2, DDC2_MARK), + PINMUX_GPIO(GPIO_FN_SDA8, SDA8_MARK), + PINMUX_GPIO(GPIO_FN_SCL8, SCL8_MARK), + PINMUX_GPIO(GPIO_FN_SDA2, SDA2_MARK), + PINMUX_GPIO(GPIO_FN_SCL2, SCL2_MARK), + PINMUX_GPIO(GPIO_FN_SDA1, SDA1_MARK), + PINMUX_GPIO(GPIO_FN_SCL1, SCL1_MARK), + PINMUX_GPIO(GPIO_FN_SDA0, SDA0_MARK), + PINMUX_GPIO(GPIO_FN_SCL0, SCL0_MARK), + + /* PTS (mobule: GRA, IIC) */ + PINMUX_GPIO(GPIO_FN_DDC1, DDC1_MARK), + PINMUX_GPIO(GPIO_FN_DDC0, DDC0_MARK), + PINMUX_GPIO(GPIO_FN_SDA9, SDA9_MARK), + PINMUX_GPIO(GPIO_FN_SCL9, SCL9_MARK), + PINMUX_GPIO(GPIO_FN_SDA5, SDA5_MARK), + PINMUX_GPIO(GPIO_FN_SCL5, SCL5_MARK), + PINMUX_GPIO(GPIO_FN_SDA4, SDA4_MARK), + PINMUX_GPIO(GPIO_FN_SCL4, SCL4_MARK), + PINMUX_GPIO(GPIO_FN_SDA3, SDA3_MARK), + PINMUX_GPIO(GPIO_FN_SCL3, SCL3_MARK), + + /* PTT (mobule: SYSTEM, PWMX) */ + PINMUX_GPIO(GPIO_FN_AUDSYNC, AUDSYNC_MARK), + PINMUX_GPIO(GPIO_FN_AUDCK, AUDCK_MARK), + PINMUX_GPIO(GPIO_FN_AUDATA3, AUDATA3_MARK), + PINMUX_GPIO(GPIO_FN_AUDATA2, AUDATA2_MARK), + PINMUX_GPIO(GPIO_FN_AUDATA1, AUDATA1_MARK), + PINMUX_GPIO(GPIO_FN_AUDATA0, AUDATA0_MARK), + PINMUX_GPIO(GPIO_FN_PWX7, PWX7_MARK), + PINMUX_GPIO(GPIO_FN_PWX6, PWX6_MARK), + PINMUX_GPIO(GPIO_FN_PWX5, PWX5_MARK), + PINMUX_GPIO(GPIO_FN_PWX4, PWX4_MARK), + + /* PTU (mobule: LBSC, DMAC) */ + PINMUX_GPIO(GPIO_FN_CS6, CS6_MARK), + PINMUX_GPIO(GPIO_FN_CS5, CS5_MARK), + PINMUX_GPIO(GPIO_FN_CS4, CS4_MARK), + PINMUX_GPIO(GPIO_FN_CS0, CS0_MARK), + PINMUX_GPIO(GPIO_FN_RD, RD_MARK), + PINMUX_GPIO(GPIO_FN_WE0, WE0_MARK), + PINMUX_GPIO(GPIO_FN_A25, A25_MARK), + PINMUX_GPIO(GPIO_FN_A24, A24_MARK), + PINMUX_GPIO(GPIO_FN_DREQ0, DREQ0_MARK), + PINMUX_GPIO(GPIO_FN_DACK0, DACK0_MARK), + + /* PTV (mobule: LBSC, DMAC) */ + PINMUX_GPIO(GPIO_FN_A23, A23_MARK), + PINMUX_GPIO(GPIO_FN_A22, A22_MARK), + PINMUX_GPIO(GPIO_FN_A21, A21_MARK), + PINMUX_GPIO(GPIO_FN_A20, A20_MARK), + PINMUX_GPIO(GPIO_FN_A19, A19_MARK), + PINMUX_GPIO(GPIO_FN_A18, A18_MARK), + PINMUX_GPIO(GPIO_FN_A17, A17_MARK), + PINMUX_GPIO(GPIO_FN_A16, A16_MARK), + PINMUX_GPIO(GPIO_FN_TEND0, TEND0_MARK), + PINMUX_GPIO(GPIO_FN_DREQ1, DREQ1_MARK), + PINMUX_GPIO(GPIO_FN_DACK1, DACK1_MARK), + PINMUX_GPIO(GPIO_FN_TEND1, TEND1_MARK), + + /* PTW (mobule: LBSC) */ + PINMUX_GPIO(GPIO_FN_A16, A16_MARK), + PINMUX_GPIO(GPIO_FN_A15, A15_MARK), + PINMUX_GPIO(GPIO_FN_A14, A14_MARK), + PINMUX_GPIO(GPIO_FN_A13, A13_MARK), + PINMUX_GPIO(GPIO_FN_A12, A12_MARK), + PINMUX_GPIO(GPIO_FN_A11, A11_MARK), + PINMUX_GPIO(GPIO_FN_A10, A10_MARK), + PINMUX_GPIO(GPIO_FN_A9, A9_MARK), + PINMUX_GPIO(GPIO_FN_A8, A8_MARK), + + /* PTX (mobule: LBSC) */ + PINMUX_GPIO(GPIO_FN_A7, A7_MARK), + PINMUX_GPIO(GPIO_FN_A6, A6_MARK), + PINMUX_GPIO(GPIO_FN_A5, A5_MARK), + PINMUX_GPIO(GPIO_FN_A4, A4_MARK), + PINMUX_GPIO(GPIO_FN_A3, A3_MARK), + PINMUX_GPIO(GPIO_FN_A2, A2_MARK), + PINMUX_GPIO(GPIO_FN_A1, A1_MARK), + PINMUX_GPIO(GPIO_FN_A0, A0_MARK), + + /* PTY (mobule: LBSC) */ + PINMUX_GPIO(GPIO_FN_D7, D7_MARK), + PINMUX_GPIO(GPIO_FN_D6, D6_MARK), + PINMUX_GPIO(GPIO_FN_D5, D5_MARK), + PINMUX_GPIO(GPIO_FN_D4, D4_MARK), + PINMUX_GPIO(GPIO_FN_D3, D3_MARK), + PINMUX_GPIO(GPIO_FN_D2, D2_MARK), + PINMUX_GPIO(GPIO_FN_D1, D1_MARK), + PINMUX_GPIO(GPIO_FN_D0, D0_MARK), + }; + +static struct pinmux_cfg_reg pinmux_config_regs[] = { + { PINMUX_CFG_REG("PACR", 0xffec0000, 16, 2) { + PTA7_FN, PTA7_OUT, PTA7_IN, 0, + PTA6_FN, PTA6_OUT, PTA6_IN, 0, + PTA5_FN, PTA5_OUT, PTA5_IN, 0, + PTA4_FN, PTA4_OUT, PTA4_IN, 0, + PTA3_FN, PTA3_OUT, PTA3_IN, 0, + PTA2_FN, PTA2_OUT, PTA2_IN, 0, + PTA1_FN, PTA1_OUT, PTA1_IN, 0, + PTA0_FN, PTA0_OUT, PTA0_IN, 0 } + }, + { PINMUX_CFG_REG("PBCR", 0xffec0002, 16, 2) { + PTB7_FN, PTB7_OUT, PTB7_IN, 0, + PTB6_FN, PTB6_OUT, PTB6_IN, 0, + PTB5_FN, PTB5_OUT, PTB5_IN, 0, + PTB4_FN, PTB4_OUT, PTB4_IN, 0, + PTB3_FN, PTB3_OUT, PTB3_IN, 0, + PTB2_FN, PTB2_OUT, PTB2_IN, 0, + PTB1_FN, PTB1_OUT, PTB1_IN, 0, + PTB0_FN, PTB0_OUT, PTB0_IN, 0 } + }, + { PINMUX_CFG_REG("PCCR", 0xffec0004, 16, 2) { + PTC7_FN, PTC7_OUT, PTC7_IN, 0, + PTC6_FN, PTC6_OUT, PTC6_IN, 0, + PTC5_FN, PTC5_OUT, PTC5_IN, 0, + PTC4_FN, PTC4_OUT, PTC4_IN, 0, + PTC3_FN, PTC3_OUT, PTC3_IN, 0, + PTC2_FN, PTC2_OUT, PTC2_IN, 0, + PTC1_FN, PTC1_OUT, PTC1_IN, 0, + PTC0_FN, PTC0_OUT, PTC0_IN, 0 } + }, + { PINMUX_CFG_REG("PDCR", 0xffec0006, 16, 2) { + PTD7_FN, PTD7_OUT, PTD7_IN, 0, + PTD6_FN, PTD6_OUT, PTD6_IN, 0, + PTD5_FN, PTD5_OUT, PTD5_IN, 0, + PTD4_FN, PTD4_OUT, PTD4_IN, 0, + PTD3_FN, PTD3_OUT, PTD3_IN, 0, + PTD2_FN, PTD2_OUT, PTD2_IN, 0, + PTD1_FN, PTD1_OUT, PTD1_IN, 0, + PTD0_FN, PTD0_OUT, PTD0_IN, 0 } + }, + { PINMUX_CFG_REG("PECR", 0xffec0008, 16, 2) { + PTE7_FN, PTE7_OUT, PTE7_IN, 0, + PTE6_FN, PTE6_OUT, PTE6_IN, 0, + PTE5_FN, PTE5_OUT, PTE5_IN, 0, + PTE4_FN, PTE4_OUT, PTE4_IN, 0, + PTE3_FN, PTE3_OUT, PTE3_IN, 0, + PTE2_FN, PTE2_OUT, PTE2_IN, 0, + PTE1_FN, PTE1_OUT, PTE1_IN, 0, + PTE0_FN, PTE0_OUT, PTE0_IN, 0 } + }, + { PINMUX_CFG_REG("PFCR", 0xffec000a, 16, 2) { + PTF7_FN, PTF7_OUT, PTF7_IN, 0, + PTF6_FN, PTF6_OUT, PTF6_IN, 0, + PTF5_FN, PTF5_OUT, PTF5_IN, 0, + PTF4_FN, PTF4_OUT, PTF4_IN, 0, + PTF3_FN, PTF3_OUT, PTF3_IN, 0, + PTF2_FN, PTF2_OUT, PTF2_IN, 0, + PTF1_FN, PTF1_OUT, PTF1_IN, 0, + PTF0_FN, PTF0_OUT, PTF0_IN, 0 } + }, + { PINMUX_CFG_REG("PGCR", 0xffec000c, 16, 2) { + PTG7_FN, PTG7_OUT, PTG7_IN, 0, + PTG6_FN, PTG6_OUT, PTG6_IN, 0, + PTG5_FN, PTG5_OUT, PTG5_IN, 0, + PTG4_FN, PTG4_OUT, PTG4_IN, 0, + PTG3_FN, PTG3_OUT, PTG3_IN, 0, + PTG2_FN, PTG2_OUT, PTG2_IN, 0, + PTG1_FN, PTG1_OUT, PTG1_IN, 0, + PTG0_FN, PTG0_OUT, PTG0_IN, 0 } + }, + { PINMUX_CFG_REG("PHCR", 0xffec000e, 16, 2) { + PTH7_FN, PTH7_OUT, PTH7_IN, 0, + PTH6_FN, PTH6_OUT, PTH6_IN, 0, + PTH5_FN, PTH5_OUT, PTH5_IN, 0, + PTH4_FN, PTH4_OUT, PTH4_IN, 0, + PTH3_FN, PTH3_OUT, PTH3_IN, 0, + PTH2_FN, PTH2_OUT, PTH2_IN, 0, + PTH1_FN, PTH1_OUT, PTH1_IN, 0, + PTH0_FN, PTH0_OUT, PTH0_IN, 0 } + }, + { PINMUX_CFG_REG("PICR", 0xffec0010, 16, 2) { + PTI7_FN, PTI7_OUT, PTI7_IN, 0, + PTI6_FN, PTI6_OUT, PTI6_IN, 0, + PTI5_FN, PTI5_OUT, PTI5_IN, 0, + PTI4_FN, PTI4_OUT, PTI4_IN, 0, + PTI3_FN, PTI3_OUT, PTI3_IN, 0, + PTI2_FN, PTI2_OUT, PTI2_IN, 0, + PTI1_FN, PTI1_OUT, PTI1_IN, 0, + PTI0_FN, PTI0_OUT, PTI0_IN, 0 } + }, + { PINMUX_CFG_REG("PJCR", 0xffec0012, 16, 2) { + PTJ7_FN, PTJ7_OUT, PTJ7_IN, 0, + PTJ6_FN, PTJ6_OUT, PTJ6_IN, 0, + PTJ5_FN, PTJ5_OUT, PTJ5_IN, 0, + PTJ4_FN, PTJ4_OUT, PTJ4_IN, 0, + PTJ3_FN, PTJ3_OUT, PTJ3_IN, 0, + PTJ2_FN, PTJ2_OUT, PTJ2_IN, 0, + PTJ1_FN, PTJ1_OUT, PTJ1_IN, 0, + PTJ0_FN, PTJ0_OUT, PTJ0_IN, 0 } + }, + { PINMUX_CFG_REG("PKCR", 0xffec0014, 16, 2) { + PTK7_FN, PTK7_OUT, PTK7_IN, 0, + PTK6_FN, PTK6_OUT, PTK6_IN, 0, + PTK5_FN, PTK5_OUT, PTK5_IN, 0, + PTK4_FN, PTK4_OUT, PTK4_IN, 0, + PTK3_FN, PTK3_OUT, PTK3_IN, 0, + PTK2_FN, PTK2_OUT, PTK2_IN, 0, + PTK1_FN, PTK1_OUT, PTK1_IN, 0, + PTK0_FN, PTK0_OUT, PTK0_IN, 0 } + }, + { PINMUX_CFG_REG("PLCR", 0xffec0016, 16, 2) { + PTL7_FN, PTL7_OUT, PTL7_IN, 0, + PTL6_FN, PTL6_OUT, PTL6_IN, 0, + PTL5_FN, PTL5_OUT, PTL5_IN, 0, + PTL4_FN, PTL4_OUT, PTL4_IN, 0, + PTL3_FN, PTL3_OUT, PTL3_IN, 0, + PTL2_FN, PTL2_OUT, PTL2_IN, 0, + PTL1_FN, PTL1_OUT, PTL1_IN, 0, + PTL0_FN, PTL0_OUT, PTL0_IN, 0 } + }, + { PINMUX_CFG_REG("PMCR", 0xffec0018, 16, 2) { + 0, 0, 0, 0, /* reserved: always set 1 */ + PTM6_FN, PTM6_OUT, PTM6_IN, 0, + PTM5_FN, PTM5_OUT, PTM5_IN, 0, + PTM4_FN, PTM4_OUT, PTM4_IN, 0, + PTM3_FN, PTM3_OUT, PTM3_IN, 0, + PTM2_FN, PTM2_OUT, PTM2_IN, 0, + PTM1_FN, PTM1_OUT, PTM1_IN, 0, + PTM0_FN, PTM0_OUT, PTM0_IN, 0 } + }, + { PINMUX_CFG_REG("PNCR", 0xffec001a, 16, 2) { + PTN7_FN, PTN7_OUT, PTN7_IN, 0, + PTN6_FN, PTN6_OUT, PTN6_IN, 0, + PTN5_FN, PTN5_OUT, PTN5_IN, 0, + PTN4_FN, PTN4_OUT, PTN4_IN, 0, + PTN3_FN, PTN3_OUT, PTN3_IN, 0, + PTN2_FN, PTN2_OUT, PTN2_IN, 0, + PTN1_FN, PTN1_OUT, PTN1_IN, 0, + PTN0_FN, PTN0_OUT, PTN0_IN, 0 } + }, + { PINMUX_CFG_REG("POCR", 0xffec001c, 16, 2) { + PTO7_FN, PTO7_OUT, PTO7_IN, 0, + PTO6_FN, PTO6_OUT, PTO6_IN, 0, + PTO5_FN, PTO5_OUT, PTO5_IN, 0, + PTO4_FN, PTO4_OUT, PTO4_IN, 0, + PTO3_FN, PTO3_OUT, PTO3_IN, 0, + PTO2_FN, PTO2_OUT, PTO2_IN, 0, + PTO1_FN, PTO1_OUT, PTO1_IN, 0, + PTO0_FN, PTO0_OUT, PTO0_IN, 0 } + }, + { PINMUX_CFG_REG("PPCR", 0xffec001e, 16, 2) { + 0, 0, 0, 0, /* reserved: always set 1 */ + PTP6_FN, PTP6_OUT, PTP6_IN, 0, + PTP5_FN, PTP5_OUT, PTP5_IN, 0, + PTP4_FN, PTP4_OUT, PTP4_IN, 0, + PTP3_FN, PTP3_OUT, PTP3_IN, 0, + PTP2_FN, PTP2_OUT, PTP2_IN, 0, + PTP1_FN, PTP1_OUT, PTP1_IN, 0, + PTP0_FN, PTP0_OUT, PTP0_IN, 0 } + }, + { PINMUX_CFG_REG("PQCR", 0xffec0020, 16, 2) { + 0, 0, 0, 0, /* reserved: always set 1 */ + PTQ6_FN, PTQ6_OUT, PTQ6_IN, 0, + PTQ5_FN, PTQ5_OUT, PTQ5_IN, 0, + PTQ4_FN, PTQ4_OUT, PTQ4_IN, 0, + PTQ3_FN, PTQ3_OUT, PTQ3_IN, 0, + PTQ2_FN, PTQ2_OUT, PTQ2_IN, 0, + PTQ1_FN, PTQ1_OUT, PTQ1_IN, 0, + PTQ0_FN, PTQ0_OUT, PTQ0_IN, 0 } + }, + { PINMUX_CFG_REG("PRCR", 0xffec0022, 16, 2) { + PTR7_FN, PTR7_OUT, PTR7_IN, 0, + PTR6_FN, PTR6_OUT, PTR6_IN, 0, + PTR5_FN, PTR5_OUT, PTR5_IN, 0, + PTR4_FN, PTR4_OUT, PTR4_IN, 0, + PTR3_FN, PTR3_OUT, PTR3_IN, 0, + PTR2_FN, PTR2_OUT, PTR2_IN, 0, + PTR1_FN, PTR1_OUT, PTR1_IN, 0, + PTR0_FN, PTR0_OUT, PTR0_IN, 0 } + }, + { PINMUX_CFG_REG("PSCR", 0xffec0024, 16, 2) { + PTS7_FN, PTS7_OUT, PTS7_IN, 0, + PTS6_FN, PTS6_OUT, PTS6_IN, 0, + PTS5_FN, PTS5_OUT, PTS5_IN, 0, + PTS4_FN, PTS4_OUT, PTS4_IN, 0, + PTS3_FN, PTS3_OUT, PTS3_IN, 0, + PTS2_FN, PTS2_OUT, PTS2_IN, 0, + PTS1_FN, PTS1_OUT, PTS1_IN, 0, + PTS0_FN, PTS0_OUT, PTS0_IN, 0 } + }, + { PINMUX_CFG_REG("PTCR", 0xffec0026, 16, 2) { + 0, 0, 0, 0, /* reserved: always set 1 */ + 0, 0, 0, 0, /* reserved: always set 1 */ + PTT5_FN, PTT5_OUT, PTT5_IN, 0, + PTT4_FN, PTT4_OUT, PTT4_IN, 0, + PTT3_FN, PTT3_OUT, PTT3_IN, 0, + PTT2_FN, PTT2_OUT, PTT2_IN, 0, + PTT1_FN, PTT1_OUT, PTT1_IN, 0, + PTT0_FN, PTT0_OUT, PTT0_IN, 0 } + }, + { PINMUX_CFG_REG("PUCR", 0xffec0028, 16, 2) { + PTU7_FN, PTU7_OUT, PTU7_IN, PTU7_IN_PU, + PTU6_FN, PTU6_OUT, PTU6_IN, PTU6_IN_PU, + PTU5_FN, PTU5_OUT, PTU5_IN, PTU5_IN_PU, + PTU4_FN, PTU4_OUT, PTU4_IN, PTU4_IN_PU, + PTU3_FN, PTU3_OUT, PTU3_IN, PTU3_IN_PU, + PTU2_FN, PTU2_OUT, PTU2_IN, PTU2_IN_PU, + PTU1_FN, PTU1_OUT, PTU1_IN, PTU1_IN_PU, + PTU0_FN, PTU0_OUT, PTU0_IN, PTU0_IN_PU } + }, + { PINMUX_CFG_REG("PVCR", 0xffec002a, 16, 2) { + PTV7_FN, PTV7_OUT, PTV7_IN, PTV7_IN_PU, + PTV6_FN, PTV6_OUT, PTV6_IN, PTV6_IN_PU, + PTV5_FN, PTV5_OUT, PTV5_IN, PTV5_IN_PU, + PTV4_FN, PTV4_OUT, PTV4_IN, PTV4_IN_PU, + PTV3_FN, PTV3_OUT, PTV3_IN, PTV3_IN_PU, + PTV2_FN, PTV2_OUT, PTV2_IN, PTV2_IN_PU, + PTV1_FN, PTV1_OUT, PTV1_IN, PTV1_IN_PU, + PTV0_FN, PTV0_OUT, PTV0_IN, PTV0_IN_PU } + }, + { PINMUX_CFG_REG("PWCR", 0xffec002c, 16, 2) { + PTW7_FN, PTW7_OUT, PTW7_IN, PTW7_IN_PU, + PTW6_FN, PTW6_OUT, PTW6_IN, PTW6_IN_PU, + PTW5_FN, PTW5_OUT, PTW5_IN, PTW5_IN_PU, + PTW4_FN, PTW4_OUT, PTW4_IN, PTW4_IN_PU, + PTW3_FN, PTW3_OUT, PTW3_IN, PTW3_IN_PU, + PTW2_FN, PTW2_OUT, PTW2_IN, PTW2_IN_PU, + PTW1_FN, PTW1_OUT, PTW1_IN, PTW1_IN_PU, + PTW0_FN, PTW0_OUT, PTW0_IN, PTW0_IN_PU } + }, + { PINMUX_CFG_REG("PXCR", 0xffec002e, 16, 2) { + PTX7_FN, PTX7_OUT, PTX7_IN, PTX7_IN_PU, + PTX6_FN, PTX6_OUT, PTX6_IN, PTX6_IN_PU, + PTX5_FN, PTX5_OUT, PTX5_IN, PTX5_IN_PU, + PTX4_FN, PTX4_OUT, PTX4_IN, PTX4_IN_PU, + PTX3_FN, PTX3_OUT, PTX3_IN, PTX3_IN_PU, + PTX2_FN, PTX2_OUT, PTX2_IN, PTX2_IN_PU, + PTX1_FN, PTX1_OUT, PTX1_IN, PTX1_IN_PU, + PTX0_FN, PTX0_OUT, PTX0_IN, PTX0_IN_PU } + }, + { PINMUX_CFG_REG("PYCR", 0xffec0030, 16, 2) { + PTY7_FN, PTY7_OUT, PTY7_IN, PTY7_IN_PU, + PTY6_FN, PTY6_OUT, PTY6_IN, PTY6_IN_PU, + PTY5_FN, PTY5_OUT, PTY5_IN, PTY5_IN_PU, + PTY4_FN, PTY4_OUT, PTY4_IN, PTY4_IN_PU, + PTY3_FN, PTY3_OUT, PTY3_IN, PTY3_IN_PU, + PTY2_FN, PTY2_OUT, PTY2_IN, PTY2_IN_PU, + PTY1_FN, PTY1_OUT, PTY1_IN, PTY1_IN_PU, + PTY0_FN, PTY0_OUT, PTY0_IN, PTY0_IN_PU } + }, + { PINMUX_CFG_REG("PZCR", 0xffec0032, 16, 2) { + 0, PTZ7_OUT, PTZ7_IN, 0, + 0, PTZ6_OUT, PTZ6_IN, 0, + 0, PTZ5_OUT, PTZ5_IN, 0, + 0, PTZ4_OUT, PTZ4_IN, 0, + 0, PTZ3_OUT, PTZ3_IN, 0, + 0, PTZ2_OUT, PTZ2_IN, 0, + 0, PTZ1_OUT, PTZ1_IN, 0, + 0, PTZ0_OUT, PTZ0_IN, 0 } + }, + + { PINMUX_CFG_REG("PSEL0", 0xffec0070, 16, 1) { + PS0_15_FN3, PS0_15_FN1, + PS0_14_FN3, PS0_14_FN1, + PS0_13_FN3, PS0_13_FN1, + PS0_12_FN3, PS0_12_FN1, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + PS0_7_FN2, PS0_7_FN1, + PS0_6_FN2, PS0_6_FN1, + PS0_5_FN2, PS0_5_FN1, + PS0_4_FN2, PS0_4_FN1, + PS0_3_FN2, PS0_3_FN1, + PS0_2_FN2, PS0_2_FN1, + PS0_1_FN2, PS0_1_FN1, + 0, 0, } + }, + { PINMUX_CFG_REG("PSEL1", 0xffec0072, 16, 1) { + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + PS1_7_FN1, PS1_7_FN3, + PS1_6_FN1, PS1_6_FN3, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, } + }, + { PINMUX_CFG_REG("PSEL2", 0xffec0074, 16, 1) { + 0, 0, + 0, 0, + PS2_13_FN3, PS2_13_FN1, + PS2_12_FN3, PS2_12_FN1, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + PS2_1_FN1, PS2_1_FN2, + PS2_0_FN1, PS2_0_FN2, } + }, + { PINMUX_CFG_REG("PSEL4", 0xffec0078, 16, 1) { + PS4_15_FN2, PS4_15_FN1, + PS4_14_FN2, PS4_14_FN1, + PS4_13_FN2, PS4_13_FN1, + PS4_12_FN2, PS4_12_FN1, + PS4_11_FN2, PS4_11_FN1, + PS4_10_FN2, PS4_10_FN1, + PS4_9_FN2, PS4_9_FN1, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + PS4_3_FN2, PS4_3_FN1, + PS4_2_FN2, PS4_2_FN1, + PS4_1_FN2, PS4_1_FN1, + PS4_0_FN2, PS4_0_FN1, } + }, + { PINMUX_CFG_REG("PSEL5", 0xffec007a, 16, 1) { + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + PS5_9_FN1, PS5_9_FN2, + PS5_8_FN1, PS5_8_FN2, + PS5_7_FN1, PS5_7_FN2, + PS5_6_FN1, PS5_6_FN2, + PS5_5_FN1, PS5_5_FN2, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, } + }, + { PINMUX_CFG_REG("PSEL6", 0xffec007c, 16, 1) { + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, + PS6_7_FN_AN, PS6_7_FN_EV, + PS6_6_FN_AN, PS6_6_FN_EV, + PS6_5_FN_AN, PS6_5_FN_EV, + PS6_4_FN_AN, PS6_4_FN_EV, + PS6_3_FN_AN, PS6_3_FN_EV, + PS6_2_FN_AN, PS6_2_FN_EV, + PS6_1_FN_AN, PS6_1_FN_EV, + PS6_0_FN_AN, PS6_0_FN_EV, } + }, + {} +}; + +static struct pinmux_data_reg pinmux_data_regs[] = { + { PINMUX_DATA_REG("PADR", 0xffec0034, 8) { + PTA7_DATA, PTA6_DATA, PTA5_DATA, PTA4_DATA, + PTA3_DATA, PTA2_DATA, PTA1_DATA, PTA0_DATA } + }, + { PINMUX_DATA_REG("PBDR", 0xffec0036, 8) { + PTB7_DATA, PTB6_DATA, PTB5_DATA, PTB4_DATA, + PTB3_DATA, PTB2_DATA, PTB1_DATA, PTB0_DATA } + }, + { PINMUX_DATA_REG("PCDR", 0xffec0038, 8) { + PTC7_DATA, PTC6_DATA, PTC5_DATA, PTC4_DATA, + PTC3_DATA, PTC2_DATA, PTC1_DATA, PTC0_DATA } + }, + { PINMUX_DATA_REG("PDDR", 0xffec003a, 8) { + PTD7_DATA, PTD6_DATA, PTD5_DATA, PTD4_DATA, + PTD3_DATA, PTD2_DATA, PTD1_DATA, PTD0_DATA } + }, + { PINMUX_DATA_REG("PEDR", 0xffec003c, 8) { + PTE7_DATA, PTE6_DATA, PTE5_DATA, PTE4_DATA, + PTE3_DATA, PTE2_DATA, PTE1_DATA, PTE0_DATA } + }, + { PINMUX_DATA_REG("PFDR", 0xffec003e, 8) { + PTF7_DATA, PTF6_DATA, PTF5_DATA, PTF4_DATA, + PTF3_DATA, PTF2_DATA, PTF1_DATA, PTF0_DATA } + }, + { PINMUX_DATA_REG("PGDR", 0xffec0040, 8) { + PTG7_DATA, PTG6_DATA, PTG5_DATA, PTG4_DATA, + PTG3_DATA, PTG2_DATA, PTG1_DATA, PTG0_DATA } + }, + { PINMUX_DATA_REG("PHDR", 0xffec0042, 8) { + PTH7_DATA, PTH6_DATA, PTH5_DATA, PTH4_DATA, + PTH3_DATA, PTH2_DATA, PTH1_DATA, PTH0_DATA } + }, + { PINMUX_DATA_REG("PIDR", 0xffec0044, 8) { + PTI7_DATA, PTI6_DATA, PTI5_DATA, PTI4_DATA, + PTI3_DATA, PTI2_DATA, PTI1_DATA, PTI0_DATA } + }, + { PINMUX_DATA_REG("PJDR", 0xffec0046, 8) { + PTJ7_DATA, PTJ6_DATA, PTJ5_DATA, PTJ4_DATA, + PTJ3_DATA, PTJ2_DATA, PTJ1_DATA, PTJ0_DATA } + }, + { PINMUX_DATA_REG("PKDR", 0xffec0048, 8) { + PTK7_DATA, PTK6_DATA, PTK5_DATA, PTK4_DATA, + PTK3_DATA, PTK2_DATA, PTK1_DATA, PTK0_DATA } + }, + { PINMUX_DATA_REG("PLDR", 0xffec004a, 8) { + PTL7_DATA, PTL6_DATA, PTL5_DATA, PTL4_DATA, + PTL3_DATA, PTL2_DATA, PTL1_DATA, PTL0_DATA } + }, + { PINMUX_DATA_REG("PMDR", 0xffec004c, 8) { + 0, PTM6_DATA, PTM5_DATA, PTM4_DATA, + PTM3_DATA, PTM2_DATA, PTM1_DATA, PTM0_DATA } + }, + { PINMUX_DATA_REG("PNDR", 0xffec004e, 8) { + PTN7_DATA, PTN6_DATA, PTN5_DATA, PTN4_DATA, + PTN3_DATA, PTN2_DATA, PTN1_DATA, PTN0_DATA } + }, + { PINMUX_DATA_REG("PODR", 0xffec0050, 8) { + PTO7_DATA, PTO6_DATA, PTO5_DATA, PTO4_DATA, + PTO3_DATA, PTO2_DATA, PTO1_DATA, PTO0_DATA } + }, + { PINMUX_DATA_REG("PPDR", 0xffec0052, 8) { + 0, PTP6_DATA, PTP5_DATA, PTP4_DATA, + PTP3_DATA, PTP2_DATA, PTP1_DATA, PTP0_DATA } + }, + { PINMUX_DATA_REG("PQDR", 0xffec0054, 8) { + 0, PTQ6_DATA, PTQ5_DATA, PTQ4_DATA, + PTQ3_DATA, PTQ2_DATA, PTQ1_DATA, PTQ0_DATA } + }, + { PINMUX_DATA_REG("PRDR", 0xffec0056, 8) { + PTR7_DATA, PTR6_DATA, PTR5_DATA, PTR4_DATA, + PTR3_DATA, PTR2_DATA, PTR1_DATA, PTR0_DATA } + }, + { PINMUX_DATA_REG("PSDR", 0xffec0058, 8) { + PTS7_DATA, PTS6_DATA, PTS5_DATA, PTS4_DATA, + PTS3_DATA, PTS2_DATA, PTS1_DATA, PTS0_DATA } + }, + { PINMUX_DATA_REG("PTDR", 0xffec005a, 8) { + 0, 0, PTT5_DATA, PTT4_DATA, + PTT3_DATA, PTT2_DATA, PTT1_DATA, PTT0_DATA } + }, + { PINMUX_DATA_REG("PUDR", 0xffec005c, 8) { + PTU7_DATA, PTU6_DATA, PTU5_DATA, PTU4_DATA, + PTU3_DATA, PTU2_DATA, PTU1_DATA, PTU0_DATA } + }, + { PINMUX_DATA_REG("PVDR", 0xffec005e, 8) { + PTV7_DATA, PTV6_DATA, PTV5_DATA, PTV4_DATA, + PTV3_DATA, PTV2_DATA, PTV1_DATA, PTV0_DATA } + }, + { PINMUX_DATA_REG("PWDR", 0xffec0060, 8) { + PTW7_DATA, PTW6_DATA, PTW5_DATA, PTW4_DATA, + PTW3_DATA, PTW2_DATA, PTW1_DATA, PTW0_DATA } + }, + { PINMUX_DATA_REG("PXDR", 0xffec0062, 8) { + PTX7_DATA, PTX6_DATA, PTX5_DATA, PTX4_DATA, + PTX3_DATA, PTX2_DATA, PTX1_DATA, PTX0_DATA } + }, + { PINMUX_DATA_REG("PYDR", 0xffec0064, 8) { + PTY7_DATA, PTY6_DATA, PTY5_DATA, PTY4_DATA, + PTY3_DATA, PTY2_DATA, PTY1_DATA, PTY0_DATA } + }, + { PINMUX_DATA_REG("PZDR", 0xffec0066, 8) { + PTZ7_DATA, PTZ6_DATA, PTZ5_DATA, PTZ4_DATA, + PTZ3_DATA, PTZ2_DATA, PTZ1_DATA, PTZ0_DATA } + }, + { }, +}; + +static struct pinmux_info sh7757_pinmux_info = { + .name = "sh7757_pfc", + .reserved_id = PINMUX_RESERVED, + .data = { PINMUX_DATA_BEGIN, PINMUX_DATA_END }, + .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, + .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END }, + .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, + .mark = { PINMUX_MARK_BEGIN, PINMUX_MARK_END }, + .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, + + .first_gpio = GPIO_PTA7, + .last_gpio = GPIO_FN_D0, + + .gpios = pinmux_gpios, + .cfg_regs = pinmux_config_regs, + .data_regs = pinmux_data_regs, + + .gpio_data = pinmux_data, + .gpio_data_size = ARRAY_SIZE(pinmux_data), +}; + +static int __init plat_pinmux_setup(void) +{ + return register_pinmux(&sh7757_pinmux_info); +} + +arch_initcall(plat_pinmux_setup); diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c new file mode 100644 index 000000000000..c470e15f2e03 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c @@ -0,0 +1,513 @@ +/* + * SH7757 Setup + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * based on setup-sh7785.c : Copyright (C) 2007 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include + +static struct sh_timer_config tmu0_platform_data = { + .name = "TMU0", + .channel_offset = 0x04, + .timer_bit = 0, + .clk = "peripheral_clk", + .clockevent_rating = 200, +}; + +static struct resource tmu0_resources[] = { + [0] = { + .name = "TMU0", + .start = 0xfe430008, + .end = 0xfe430013, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 28, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device tmu0_device = { + .name = "sh_tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct sh_timer_config tmu1_platform_data = { + .name = "TMU1", + .channel_offset = 0x10, + .timer_bit = 1, + .clk = "peripheral_clk", + .clocksource_rating = 200, +}; + +static struct resource tmu1_resources[] = { + [0] = { + .name = "TMU1", + .start = 0xfe430014, + .end = 0xfe43001f, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 29, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device tmu1_device = { + .name = "sh_tmu", + .id = 1, + .dev = { + .platform_data = &tmu1_platform_data, + }, + .resource = tmu1_resources, + .num_resources = ARRAY_SIZE(tmu1_resources), +}; + +static struct plat_sci_port sci_platform_data[] = { + { + .mapbase = 0xfe4b0000, /* SCIF2 */ + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 40, 40, 40, 40 }, + }, { + .mapbase = 0xfe4c0000, /* SCIF3 */ + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 76, 76, 76, 76 }, + }, { + .mapbase = 0xfe4d0000, /* SCIF4 */ + .flags = UPF_BOOT_AUTOCONF, + .type = PORT_SCIF, + .irqs = { 104, 104, 104, 104 }, + }, { + .flags = 0, + } +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = -1, + .dev = { + .platform_data = sci_platform_data, + }, +}; + +static struct platform_device *sh7757_devices[] __initdata = { + &tmu0_device, + &tmu1_device, + &sci_device, +}; + +static int __init sh7757_devices_setup(void) +{ + return platform_add_devices(sh7757_devices, + ARRAY_SIZE(sh7757_devices)); +} +arch_initcall(sh7757_devices_setup); + +enum { + UNUSED = 0, + + /* interrupt sources */ + + IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH, + IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH, + IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH, + IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, + + IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH, + IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH, + IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH, + IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + + SDHI, + DVC, + IRQ8, IRQ9, IRQ10, + WDT0, + TMU0, TMU1, TMU2, TMU2_TICPI, + HUDI, + + ARC4, + DMAC0, + IRQ11, + SCIF2, + DMAC1_6, + USB0, + IRQ12, + JMC, + SPI1, + IRQ13, IRQ14, + USB1, + TMR01, TMR23, TMR45, + WDT1, + FRT, + LPC, + SCIF0, SCIF1, SCIF3, + PECI0I, PECI1I, PECI2I, + IRQ15, + ETHERC, + SPI0, + ADC1, + DMAC1_8, + SIM, + TMU3, TMU4, TMU5, + ADC0, + SCIF4, + IIC0_0, IIC0_1, IIC0_2, IIC0_3, + IIC1_0, IIC1_1, IIC1_2, IIC1_3, + IIC2_0, IIC2_1, IIC2_2, IIC2_3, + IIC3_0, IIC3_1, IIC3_2, IIC3_3, + IIC4_0, IIC4_1, IIC4_2, IIC4_3, + IIC5_0, IIC5_1, IIC5_2, IIC5_3, + IIC6_0, IIC6_1, IIC6_2, IIC6_3, + IIC7_0, IIC7_1, IIC7_2, IIC7_3, + IIC8_0, IIC8_1, IIC8_2, IIC8_3, + IIC9_0, IIC9_1, IIC9_2, IIC9_3, + PCIINTA, + PCIE, + SGPIO, + + /* interrupt groups */ + + TMU012, TMU345, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(SDHI, 0x480), INTC_VECT(SDHI, 0x04a0), + INTC_VECT(SDHI, 0x4c0), + INTC_VECT(DVC, 0x4e0), + INTC_VECT(IRQ8, 0x500), INTC_VECT(IRQ9, 0x520), + INTC_VECT(IRQ10, 0x540), + INTC_VECT(WDT0, 0x560), + INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0), + INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0), + INTC_VECT(HUDI, 0x600), + INTC_VECT(ARC4, 0x620), + INTC_VECT(DMAC0, 0x640), INTC_VECT(DMAC0, 0x660), + INTC_VECT(DMAC0, 0x680), INTC_VECT(DMAC0, 0x6a0), + INTC_VECT(DMAC0, 0x6c0), + INTC_VECT(IRQ11, 0x6e0), + INTC_VECT(SCIF2, 0x700), INTC_VECT(SCIF2, 0x720), + INTC_VECT(SCIF2, 0x740), INTC_VECT(SCIF2, 0x760), + INTC_VECT(DMAC0, 0x780), INTC_VECT(DMAC0, 0x7a0), + INTC_VECT(DMAC1_6, 0x7c0), INTC_VECT(DMAC1_6, 0x7e0), + INTC_VECT(USB0, 0x840), + INTC_VECT(IRQ12, 0x880), + INTC_VECT(JMC, 0x8a0), + INTC_VECT(SPI1, 0x8c0), + INTC_VECT(IRQ13, 0x8e0), INTC_VECT(IRQ14, 0x900), + INTC_VECT(USB1, 0x920), + INTC_VECT(TMR01, 0xa00), INTC_VECT(TMR23, 0xa20), + INTC_VECT(TMR45, 0xa40), + INTC_VECT(WDT1, 0xa60), + INTC_VECT(FRT, 0xa80), + INTC_VECT(LPC, 0xaa0), INTC_VECT(LPC, 0xac0), + INTC_VECT(LPC, 0xae0), INTC_VECT(LPC, 0xb00), + INTC_VECT(LPC, 0xb20), + INTC_VECT(SCIF0, 0xb40), INTC_VECT(SCIF1, 0xb60), + INTC_VECT(SCIF3, 0xb80), INTC_VECT(SCIF3, 0xba0), + INTC_VECT(SCIF3, 0xbc0), INTC_VECT(SCIF3, 0xbe0), + INTC_VECT(PECI0I, 0xc00), INTC_VECT(PECI1I, 0xc20), + INTC_VECT(PECI2I, 0xc40), + INTC_VECT(IRQ15, 0xc60), + INTC_VECT(ETHERC, 0xc80), INTC_VECT(ETHERC, 0xca0), + INTC_VECT(SPI0, 0xcc0), + INTC_VECT(ADC1, 0xce0), + INTC_VECT(DMAC1_8, 0xd00), INTC_VECT(DMAC1_8, 0xd20), + INTC_VECT(DMAC1_8, 0xd40), INTC_VECT(DMAC1_8, 0xd60), + INTC_VECT(SIM, 0xd80), INTC_VECT(SIM, 0xda0), + INTC_VECT(SIM, 0xdc0), INTC_VECT(SIM, 0xde0), + INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20), + INTC_VECT(TMU5, 0xe40), + INTC_VECT(ADC0, 0xe60), + INTC_VECT(SCIF4, 0xf00), INTC_VECT(SCIF4, 0xf20), + INTC_VECT(SCIF4, 0xf40), INTC_VECT(SCIF4, 0xf60), + INTC_VECT(IIC0_0, 0x1400), INTC_VECT(IIC0_1, 0x1420), + INTC_VECT(IIC0_2, 0x1440), INTC_VECT(IIC0_3, 0x1460), + INTC_VECT(IIC1_0, 0x1480), INTC_VECT(IIC1_1, 0x14e0), + INTC_VECT(IIC1_2, 0x1500), INTC_VECT(IIC1_3, 0x1520), + INTC_VECT(IIC2_0, 0x1540), INTC_VECT(IIC2_1, 0x1560), + INTC_VECT(IIC2_2, 0x1580), INTC_VECT(IIC2_3, 0x1600), + INTC_VECT(IIC3_0, 0x1620), INTC_VECT(IIC3_1, 0x1640), + INTC_VECT(IIC3_2, 0x16e0), INTC_VECT(IIC3_3, 0x1700), + INTC_VECT(IIC4_0, 0x17c0), INTC_VECT(IIC4_1, 0x1800), + INTC_VECT(IIC4_2, 0x1820), INTC_VECT(IIC4_3, 0x1840), + INTC_VECT(IIC5_0, 0x1860), INTC_VECT(IIC5_1, 0x1880), + INTC_VECT(IIC5_2, 0x18a0), INTC_VECT(IIC5_3, 0x18c0), + INTC_VECT(IIC6_0, 0x18e0), INTC_VECT(IIC6_1, 0x1900), + INTC_VECT(IIC6_2, 0x1920), INTC_VECT(IIC6_3, 0x1980), + INTC_VECT(IIC7_0, 0x19a0), INTC_VECT(IIC7_1, 0x1a00), + INTC_VECT(IIC7_2, 0x1a20), INTC_VECT(IIC7_3, 0x1a40), + INTC_VECT(IIC8_0, 0x1a60), INTC_VECT(IIC8_1, 0x1a80), + INTC_VECT(IIC8_2, 0x1aa0), INTC_VECT(IIC8_3, 0x1b40), + INTC_VECT(IIC9_0, 0x1b60), INTC_VECT(IIC9_1, 0x1b80), + INTC_VECT(IIC9_2, 0x1c00), INTC_VECT(IIC9_3, 0x1c20), + INTC_VECT(PCIINTA, 0x1ce0), + INTC_VECT(PCIE, 0x1e00), + INTC_VECT(SGPIO, 0x1f80), + INTC_VECT(SGPIO, 0x1fa0), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI), + INTC_GROUP(TMU345, TMU3, TMU4, TMU5), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, + + { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ + { IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH, + IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH, + IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH, + IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0, + IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH, + IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH, + IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH, + IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } }, + + { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */ + { 0, 0, 0, 0, 0, 0, 0, 0, + 0, DMAC1_8, 0, PECI0I, LPC, FRT, WDT1, TMR45, + TMR23, TMR01, 0, 0, 0, 0, 0, DMAC0, + HUDI, 0, WDT0, SCIF3, SCIF2, SDHI, TMU345, TMU012 + } }, + + { 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */ + { IRQ15, IRQ14, IRQ13, IRQ12, IRQ11, IRQ10, SCIF4, ETHERC, + IRQ9, IRQ8, SCIF1, SCIF0, USB0, 0, 0, USB1, + ADC1, 0, DMAC1_6, ADC0, SPI0, SIM, PECI2I, PECI1I, + ARC4, 0, SPI1, JMC, 0, 0, 0, DVC + } }, + + { 0xffd10038, 0xffd1003c, 32, /* INT2MSKR2 / INT2MSKCR2 */ + { IIC4_1, IIC4_2, IIC5_0, 0, 0, 0, SGPIO, 0, + 0, 0, 0, IIC9_2, IIC8_2, IIC8_1, IIC8_0, IIC7_3, + IIC7_2, IIC7_1, IIC6_3, IIC0_0, IIC0_1, IIC0_2, IIC0_3, IIC3_1, + IIC2_3, 0, IIC2_1, IIC9_1, IIC3_3, IIC1_0, PCIE, IIC2_2 + } }, + + { 0xffd100d0, 0xff1400d4, 32, /* INT2MSKR3 / INT2MSKCR4 */ + { 0, IIC6_1, IIC6_0, IIC5_1, IIC3_2, IIC2_0, 0, 0, + IIC1_3, IIC1_2, IIC9_0, IIC8_3, IIC4_3, IIC7_0, 0, IIC6_2, + PCIINTA, 0, IIC4_0, 0, 0, 0, 0, IIC9_3, + IIC3_0, 0, IIC5_3, IIC5_2, 0, 0, 0, IIC1_1 + } }, +}; + +#define INTPRI 0xffd00010 +#define INT2PRI0 0xffd40000 +#define INT2PRI1 0xffd40004 +#define INT2PRI2 0xffd40008 +#define INT2PRI3 0xffd4000c +#define INT2PRI4 0xffd40010 +#define INT2PRI5 0xffd40014 +#define INT2PRI6 0xffd40018 +#define INT2PRI7 0xffd4001c +#define INT2PRI8 0xffd400a0 +#define INT2PRI9 0xffd400a4 +#define INT2PRI10 0xffd400a8 +#define INT2PRI11 0xffd400ac +#define INT2PRI12 0xffd400b0 +#define INT2PRI13 0xffd400b4 +#define INT2PRI14 0xffd400b8 +#define INT2PRI15 0xffd400bc +#define INT2PRI16 0xffd10000 +#define INT2PRI17 0xffd10004 +#define INT2PRI18 0xffd10008 +#define INT2PRI19 0xffd1000c +#define INT2PRI20 0xffd10010 +#define INT2PRI21 0xffd10014 +#define INT2PRI22 0xffd10018 +#define INT2PRI23 0xffd1001c +#define INT2PRI24 0xffd100a0 +#define INT2PRI25 0xffd100a4 +#define INT2PRI26 0xffd100a8 +#define INT2PRI27 0xffd100ac +#define INT2PRI28 0xffd100b0 +#define INT2PRI29 0xffd100b4 +#define INT2PRI30 0xffd100b8 +#define INT2PRI31 0xffd100bc + +static struct intc_prio_reg prio_registers[] __initdata = { + { INTPRI, 0, 32, 4, { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, IRQ6, IRQ7 } }, + + { INT2PRI0, 0, 32, 8, { TMU0, TMU1, TMU2, TMU2_TICPI } }, + { INT2PRI1, 0, 32, 8, { TMU3, TMU4, TMU5, SDHI } }, + { INT2PRI2, 0, 32, 8, { SCIF2, SCIF3, WDT0, IRQ8 } }, + { INT2PRI3, 0, 32, 8, { HUDI, DMAC0, ADC0, IRQ9 } }, + { INT2PRI4, 0, 32, 8, { IRQ10, 0, TMR01, TMR23 } }, + { INT2PRI5, 0, 32, 8, { TMR45, WDT1, FRT, LPC } }, + { INT2PRI6, 0, 32, 8, { PECI0I, ETHERC, DMAC1_8, 0 } }, + { INT2PRI7, 0, 32, 8, { SCIF4, 0, IRQ11, IRQ12 } }, + { INT2PRI8, 0, 32, 8, { 0, 0, 0, DVC } }, + { INT2PRI9, 0, 32, 8, { ARC4, 0, SPI1, JMC } }, + { INT2PRI10, 0, 32, 8, { SPI0, SIM, PECI2I, PECI1I } }, + { INT2PRI11, 0, 32, 8, { ADC1, IRQ13, DMAC1_6, IRQ14 } }, + { INT2PRI12, 0, 32, 8, { USB0, 0, IRQ15, USB1 } }, + { INT2PRI13, 0, 32, 8, { 0, 0, SCIF1, SCIF0 } }, + + { INT2PRI16, 0, 32, 8, { IIC2_2, 0, 0, 0 } }, + { INT2PRI17, 0, 32, 8, { PCIE, 0, 0, IIC1_0 } }, + { INT2PRI18, 0, 32, 8, { IIC3_3, IIC9_1, IIC2_1, IIC1_2 } }, + { INT2PRI19, 0, 32, 8, { IIC2_3, IIC3_1, 0, IIC1_3 } }, + { INT2PRI20, 0, 32, 8, { IIC2_0, IIC6_3, IIC7_1, IIC7_2 } }, + { INT2PRI21, 0, 32, 8, { IIC7_3, IIC8_0, IIC8_1, IIC8_2 } }, + { INT2PRI22, 0, 32, 8, { IIC9_2, 0, 0, 0 } }, + { INT2PRI23, 0, 32, 8, { 0, SGPIO, IIC3_2, IIC5_1 } }, + { INT2PRI24, 0, 32, 8, { 0, 0, 0, IIC1_1 } }, + { INT2PRI25, 0, 32, 8, { IIC3_0, 0, IIC5_3, IIC5_2 } }, + { INT2PRI26, 0, 32, 8, { 0, 0, 0, IIC9_3 } }, + { INT2PRI27, 0, 32, 8, { PCIINTA, IIC6_0, IIC4_0, IIC6_1 } }, + { INT2PRI28, 0, 32, 8, { IIC4_3, IIC7_0, 0, IIC6_2 } }, + { INT2PRI29, 0, 32, 8, { 0, 0, IIC9_0, IIC8_3 } }, + { INT2PRI30, 0, 32, 8, { IIC4_1, IIC4_2, IIC5_0, 0 } }, + { INT2PRI31, 0, 32, 8, { IIC0_0, IIC0_1, IIC0_2, IIC0_3 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7757", vectors, groups, + mask_registers, prio_registers, NULL); + +/* Support for external interrupt pins in IRQ mode */ +static struct intc_vect vectors_irq0123[] __initdata = { + INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280), + INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300), +}; + +static struct intc_vect vectors_irq4567[] __initdata = { + INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380), + INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200), +}; + +static struct intc_sense_reg sense_registers[] __initdata = { + { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_mask_reg ack_registers[] __initdata = { + { 0xffd00024, 0, 32, /* INTREQ */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7757-irq0123", + vectors_irq0123, NULL, mask_registers, + prio_registers, sense_registers, ack_registers); + +static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7757-irq4567", + vectors_irq4567, NULL, mask_registers, + prio_registers, sense_registers, ack_registers); + +/* External interrupt pins in IRL mode */ +static struct intc_vect vectors_irl0123[] __initdata = { + INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220), + INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260), + INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0), + INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0), + INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320), + INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360), + INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0), + INTC_VECT(IRL0_HHHL, 0x3c0), +}; + +static struct intc_vect vectors_irl4567[] __initdata = { + INTC_VECT(IRL4_LLLL, 0xb00), INTC_VECT(IRL4_LLLH, 0xb20), + INTC_VECT(IRL4_LLHL, 0xb40), INTC_VECT(IRL4_LLHH, 0xb60), + INTC_VECT(IRL4_LHLL, 0xb80), INTC_VECT(IRL4_LHLH, 0xba0), + INTC_VECT(IRL4_LHHL, 0xbc0), INTC_VECT(IRL4_LHHH, 0xbe0), + INTC_VECT(IRL4_HLLL, 0xc00), INTC_VECT(IRL4_HLLH, 0xc20), + INTC_VECT(IRL4_HLHL, 0xc40), INTC_VECT(IRL4_HLHH, 0xc60), + INTC_VECT(IRL4_HHLL, 0xc80), INTC_VECT(IRL4_HHLH, 0xca0), + INTC_VECT(IRL4_HHHL, 0xcc0), +}; + +static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7757-irl0123", vectors_irl0123, + NULL, mask_registers, NULL, NULL); + +static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7757-irl4567", vectors_irl4567, + NULL, mask_registers, NULL, NULL); + +#define INTC_ICR0 0xffd00000 +#define INTC_INTMSK0 0xffd00044 +#define INTC_INTMSK1 0xffd00048 +#define INTC_INTMSK2 0xffd40080 +#define INTC_INTMSKCLR1 0xffd00068 +#define INTC_INTMSKCLR2 0xffd40084 + +void __init plat_irq_setup(void) +{ + /* disable IRQ3-0 + IRQ7-4 */ + ctrl_outl(0xff000000, INTC_INTMSK0); + + /* disable IRL3-0 + IRL7-4 */ + ctrl_outl(0xc0000000, INTC_INTMSK1); + ctrl_outl(0xfffefffe, INTC_INTMSK2); + + /* select IRL mode for IRL3-0 + IRL7-4 */ + ctrl_outl(ctrl_inl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); + + /* disable holding function, ie enable "SH-4 Mode" */ + ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00200000, INTC_ICR0); + + register_intc_controller(&intc_desc); +} + +void __init plat_irq_setup_pins(int mode) +{ + switch (mode) { + case IRQ_MODE_IRQ7654: + /* select IRQ mode for IRL7-4 */ + ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00400000, INTC_ICR0); + register_intc_controller(&intc_desc_irq4567); + break; + case IRQ_MODE_IRQ3210: + /* select IRQ mode for IRL3-0 */ + ctrl_outl(ctrl_inl(INTC_ICR0) | 0x00800000, INTC_ICR0); + register_intc_controller(&intc_desc_irq0123); + break; + case IRQ_MODE_IRL7654: + /* enable IRL7-4 but don't provide any masking */ + ctrl_outl(0x40000000, INTC_INTMSKCLR1); + ctrl_outl(0x0000fffe, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL3210: + /* enable IRL0-3 but don't provide any masking */ + ctrl_outl(0x80000000, INTC_INTMSKCLR1); + ctrl_outl(0xfffe0000, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL7654_MASK: + /* enable IRL7-4 and mask using cpu intc controller */ + ctrl_outl(0x40000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_desc_irl4567); + break; + case IRQ_MODE_IRL3210_MASK: + /* enable IRL0-3 and mask using cpu intc controller */ + ctrl_outl(0x80000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_desc_irl0123); + break; + default: + BUG(); + } +} + +void __init plat_mem_setup(void) +{ +} diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index ceb409bf7741..212e6bddaeb8 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -481,7 +481,7 @@ static const char *cpu_name[] = { [CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770", [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781", [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785", - [CPU_SH7786] = "SH7786", + [CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757", [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3", [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103", [CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723", diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 2795618e4f07..64dc1ad59801 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -82,7 +82,7 @@ config 32BIT config PMB_ENABLE bool "Support 32-bit physical addressing through PMB" - depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) + depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) select 32BIT default y help @@ -97,7 +97,7 @@ choice config PMB bool "PMB" - depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) + depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) select 32BIT help If you say Y here, physical addressing will be extended to @@ -106,7 +106,8 @@ config PMB config PMB_FIXED bool "fixed PMB" - depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7780 || \ + depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || \ + CPU_SUBTYPE_SH7780 || \ CPU_SUBTYPE_SH7785) select 32BIT help diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 4cbb87ad070a..32dc2fc50e6b 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -272,7 +272,8 @@ static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) __raw_writew(data, PSCR); } } -#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ +#elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \ + defined(CONFIG_CPU_SUBTYPE_SH7763) || \ defined(CONFIG_CPU_SUBTYPE_SH7780) || \ defined(CONFIG_CPU_SUBTYPE_SH7785) || \ defined(CONFIG_CPU_SUBTYPE_SH7786) || \ diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h index 38072c15b845..3e2fcf93b42e 100644 --- a/drivers/serial/sh-sci.h +++ b/drivers/serial/sh-sci.h @@ -112,6 +112,13 @@ #elif defined(CONFIG_H8S2678) # define SCSCR_INIT(port) 0x30 /* TIE=0,RIE=0,TE=1,RE=1 */ # define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port) +#elif defined(CONFIG_CPU_SUBTYPE_SH7757) +# define SCSPTR0 0xfe4b0020 +# define SCSPTR1 0xfe4b0020 +# define SCSPTR2 0xfe4b0020 +# define SCIF_ORER 0x0001 +# define SCSCR_INIT(port) 0x38 +# define SCIF_ONLY #elif defined(CONFIG_CPU_SUBTYPE_SH7763) # define SCSPTR0 0xffe00024 /* 16 bit SCIF */ # define SCSPTR1 0xffe08024 /* 16 bit SCIF */ @@ -562,6 +569,16 @@ static inline int sci_rxd_in(struct uart_port *port) return ctrl_inw(SCSPTR2)&0x0001 ? 1 : 0; /* SCIF */ return 1; } +#elif defined(CONFIG_CPU_SUBTYPE_SH7757) +static inline int sci_rxd_in(struct uart_port *port) +{ + if (port->mapbase == 0xfe4b0000) + return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; + if (port->mapbase == 0xfe4c0000) + return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; + if (port->mapbase == 0xfe4d0000) + return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; +} #elif defined(CONFIG_CPU_SUBTYPE_SH7760) static inline int sci_rxd_in(struct uart_port *port) { -- cgit v1.2.3-59-g8ed1b From fb3f3e7fc6d4afb32f9eba32124beaf40313de3c Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 16 Aug 2009 15:44:08 +0100 Subject: sh: unwinder: Fix memory leak and create our own kmem cache Plug a memory leak in dwarf_unwinder_dump() where we didn't free the memory that we had previously allocated for the DWARF frames and DWARF registers. Now is also a opportune time to implement our own mempool and kmem cache. It's a good idea to have a certain number of frame and register objects in reserve at all times, so that we are guaranteed to have our allocation satisfied even when memory is scarce. Since we have pools to allocate from we can implement the registers for each frame as a linked list as opposed to a sparsely populated array. Whilst it's true that the lookup time for a linked list is larger than for arrays, there's only usually a maximum of 8 registers per frame. So the overhead isn't that much of a concern. Signed-off-by: Matt Fleming --- arch/sh/include/asm/dwarf.h | 22 ++--- arch/sh/kernel/dwarf.c | 201 ++++++++++++++++++++++++++++++-------------- 2 files changed, 142 insertions(+), 81 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index 2fbe8720411e..a22fbe98303f 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h @@ -265,10 +265,7 @@ struct dwarf_frame { unsigned long pc; - struct dwarf_reg *regs; - unsigned int num_regs; /* how many regs are allocated? */ - - unsigned int depth; /* what level are we in the callstack? */ + struct list_head reg_list; unsigned long cfa; @@ -292,22 +289,15 @@ struct dwarf_frame { * @flags: Describes how to calculate the value of this register */ struct dwarf_reg { + struct list_head link; + + unsigned int number; + unsigned long addr; unsigned long flags; #define DWARF_REG_OFFSET (1 << 0) }; -/** - * dwarf_stack - a DWARF stack contains a collection of DWARF frames - * @depth: the number of frames in the stack - * @level: an array of DWARF frames, indexed by stack level - * - */ -struct dwarf_stack { - unsigned int depth; - struct dwarf_frame **level; -}; - /* * Call Frame instruction opcodes. */ @@ -372,7 +362,7 @@ static inline unsigned int DW_CFA_operand(unsigned long insn) extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, struct dwarf_frame *); -#endif /* __ASSEMBLY__ */ +#endif /* !__ASSEMBLY__ */ #define CFI_STARTPROC .cfi_startproc #define CFI_ENDPROC .cfi_endproc diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index d0652153f576..e4810375207d 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -25,6 +26,17 @@ #include #include +/* Reserve enough memory for two stack frames */ +#define DWARF_FRAME_MIN_REQ 2 +/* ... with 4 registers per frame. */ +#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4) + +static struct kmem_cache *dwarf_frame_cachep; +static mempool_t *dwarf_frame_pool; + +static struct kmem_cache *dwarf_reg_cachep; +static mempool_t *dwarf_reg_pool; + static LIST_HEAD(dwarf_cie_list); static DEFINE_SPINLOCK(dwarf_cie_lock); @@ -33,33 +45,25 @@ static DEFINE_SPINLOCK(dwarf_fde_lock); static struct dwarf_cie *cached_cie; -/* - * Figure out whether we need to allocate some dwarf registers. If dwarf - * registers have already been allocated then we may need to realloc - * them. "reg" is a register number that we need to be able to access - * after this call. +/** + * dwarf_frame_alloc_reg - allocate memory for a DWARF register + * @frame: the DWARF frame whose list of registers we insert on + * @reg_num: the register number + * + * Allocate space for, and initialise, a dwarf reg from + * dwarf_reg_pool and insert it onto the (unsorted) linked-list of + * dwarf registers for @frame. * - * Register numbers start at zero, therefore we need to allocate space - * for "reg" + 1 registers. + * Return the initialised DWARF reg. */ -static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, - unsigned int reg) +static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame, + unsigned int reg_num) { - struct dwarf_reg *regs; - unsigned int num_regs = reg + 1; - size_t new_size; - size_t old_size; + struct dwarf_reg *reg; - new_size = num_regs * sizeof(*regs); - old_size = frame->num_regs * sizeof(*regs); - - /* Fast path: don't allocate any regs if we've already got enough. */ - if (frame->num_regs >= num_regs) - return; - - regs = kzalloc(new_size, GFP_ATOMIC); - if (!regs) { - printk(KERN_WARNING "Unable to allocate DWARF registers\n"); + reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC); + if (!reg) { + printk(KERN_WARNING "Unable to allocate a DWARF register\n"); /* * Let's just bomb hard here, we have no way to * gracefully recover. @@ -67,13 +71,44 @@ static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, BUG(); } - if (frame->regs) { - memcpy(regs, frame->regs, old_size); - kfree(frame->regs); + reg->number = reg_num; + reg->addr = 0; + reg->flags = 0; + + list_add(®->link, &frame->reg_list); + + return reg; +} + +static void dwarf_frame_free_regs(struct dwarf_frame *frame) +{ + struct dwarf_reg *reg, *n; + + list_for_each_entry_safe(reg, n, &frame->reg_list, link) { + list_del(®->link); + mempool_free(reg, dwarf_reg_pool); + } +} + +/** + * dwarf_frame_reg - return a DWARF register + * @frame: the DWARF frame to search in for @reg_num + * @reg_num: the register number to search for + * + * Lookup and return the dwarf reg @reg_num for this frame. Return + * NULL if @reg_num is an register invalid number. + */ +static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame, + unsigned int reg_num) +{ + struct dwarf_reg *reg; + + list_for_each_entry(reg, &frame->reg_list, link) { + if (reg->number == reg_num) + return reg; } - frame->regs = regs; - frame->num_regs = num_regs; + return NULL; } /** @@ -347,6 +382,7 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, unsigned char insn; unsigned char *current_insn; unsigned int count, delta, reg, expr_len, offset; + struct dwarf_reg *regp; current_insn = insn_start; @@ -369,9 +405,9 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, count = dwarf_read_uleb128(current_insn, &offset); current_insn += count; offset *= cie->data_alignment_factor; - dwarf_frame_alloc_regs(frame, reg); - frame->regs[reg].addr = offset; - frame->regs[reg].flags |= DWARF_REG_OFFSET; + regp = dwarf_frame_alloc_reg(frame, reg); + regp->addr = offset; + regp->flags |= DWARF_REG_OFFSET; continue; /* NOTREACHED */ case DW_CFA_restore: @@ -453,17 +489,18 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, count = dwarf_read_leb128(current_insn, &offset); current_insn += count; offset *= cie->data_alignment_factor; - dwarf_frame_alloc_regs(frame, reg); - frame->regs[reg].flags |= DWARF_REG_OFFSET; - frame->regs[reg].addr = offset; + regp = dwarf_frame_alloc_reg(frame, reg); + regp->flags |= DWARF_REG_OFFSET; + regp->addr = offset; break; case DW_CFA_val_offset: count = dwarf_read_uleb128(current_insn, ®); current_insn += count; count = dwarf_read_leb128(current_insn, &offset); offset *= cie->data_alignment_factor; - frame->regs[reg].flags |= DWARF_REG_OFFSET; - frame->regs[reg].addr = offset; + regp = dwarf_frame_alloc_reg(frame, reg); + regp->flags |= DWARF_REG_OFFSET; + regp->addr = offset; break; case DW_CFA_GNU_args_size: count = dwarf_read_uleb128(current_insn, &offset); @@ -474,9 +511,10 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, current_insn += count; count = dwarf_read_uleb128(current_insn, &offset); offset *= cie->data_alignment_factor; - dwarf_frame_alloc_regs(frame, reg); - frame->regs[reg].flags |= DWARF_REG_OFFSET; - frame->regs[reg].addr = -offset; + + regp = dwarf_frame_alloc_reg(frame, reg); + regp->flags |= DWARF_REG_OFFSET; + regp->addr = -offset; break; default: pr_debug("unhandled DWARF instruction 0x%x\n", insn); @@ -502,8 +540,8 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, struct dwarf_frame *frame; struct dwarf_cie *cie; struct dwarf_fde *fde; + struct dwarf_reg *reg; unsigned long addr; - int i, offset; /* * If this is the first invocation of this recursive function we @@ -516,11 +554,16 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, if (!pc && !prev) pc = (unsigned long)current_text_addr(); - frame = kzalloc(sizeof(*frame), GFP_ATOMIC); - if (!frame) - return NULL; + frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC); + if (!frame) { + printk(KERN_ERR "Unable to allocate a dwarf frame\n"); + BUG(); + } + INIT_LIST_HEAD(&frame->reg_list); + frame->flags = 0; frame->prev = prev; + frame->return_addr = 0; fde = dwarf_lookup_fde(pc); if (!fde) { @@ -540,7 +583,7 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, * case above, which sucks because we could print a * warning here. */ - return NULL; + goto bail; } cie = dwarf_lookup_cie(fde->cie_pointer); @@ -560,10 +603,10 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, switch (frame->flags) { case DWARF_FRAME_CFA_REG_OFFSET: if (prev) { - BUG_ON(!prev->regs[frame->cfa_register].flags); + reg = dwarf_frame_reg(prev, frame->cfa_register); + BUG_ON(!reg); - addr = prev->cfa; - addr += prev->regs[frame->cfa_register].addr; + addr = prev->cfa + reg->addr; frame->cfa = __raw_readl(addr); } else { @@ -584,23 +627,18 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, } /* If we haven't seen the return address reg, we're screwed. */ - BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags); - - for (i = 0; i <= frame->num_regs; i++) { - struct dwarf_reg *reg = &frame->regs[i]; - - if (!reg->flags) - continue; + reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG); + BUG_ON(!reg); - offset = reg->addr; - offset += frame->cfa; - } - - addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr; + addr = frame->cfa + reg->addr; frame->return_addr = __raw_readl(addr); - frame->next = dwarf_unwind_stack(frame->return_addr, frame); return frame; + +bail: + dwarf_frame_free_regs(frame); + mempool_free(frame, dwarf_frame_pool); + return NULL; } static int dwarf_parse_cie(void *entry, void *p, unsigned long len, @@ -770,14 +808,29 @@ static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, const struct stacktrace_ops *ops, void *data) { - struct dwarf_frame *frame; + struct dwarf_frame *frame, *_frame; + unsigned long return_addr; + + _frame = NULL; + return_addr = 0; - frame = dwarf_unwind_stack(0, NULL); + while (1) { + frame = dwarf_unwind_stack(return_addr, _frame); + + if (_frame) { + dwarf_frame_free_regs(_frame); + mempool_free(_frame, dwarf_frame_pool); + } + + _frame = frame; + + if (!frame || !frame->return_addr) + break; - while (frame && frame->return_addr) { - ops->address(data, frame->return_addr, 1); - frame = frame->next; + return_addr = frame->return_addr; + ops->address(data, return_addr, 1); } + } static struct unwinder dwarf_unwinder = { @@ -801,6 +854,9 @@ static void dwarf_unwinder_cleanup(void) list_for_each_entry(fde, &dwarf_fde_list, link) kfree(fde); + + kmem_cache_destroy(dwarf_reg_cachep); + kmem_cache_destroy(dwarf_frame_cachep); } /** @@ -827,6 +883,21 @@ static int __init dwarf_unwinder_init(void) f_entries = 0; entry = &__start_eh_frame; + dwarf_frame_cachep = kmem_cache_create("dwarf_frames", + sizeof(struct dwarf_frame), 0, SLAB_PANIC, NULL); + dwarf_reg_cachep = kmem_cache_create("dwarf_regs", + sizeof(struct dwarf_reg), 0, SLAB_PANIC, NULL); + + dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, + mempool_alloc_slab, + mempool_free_slab, + dwarf_frame_cachep); + + dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, + mempool_alloc_slab, + mempool_free_slab, + dwarf_reg_cachep); + while ((char *)entry < __stop_eh_frame) { p = entry; -- cgit v1.2.3-59-g8ed1b From 97efbbd5886e27b61c19c77d41f6491f5d96fbd0 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 16 Aug 2009 15:56:35 +0100 Subject: sh: unwinder: Set the flags for DW_CFA_val_offset ops as DWARF_VAL_OFFSET The handling of DW_CFA_val_offset ops was incorrectly using the DWARF_REG_OFFSET flag but the register's value cannot be calculated using the DWARF_REG_OFFSET method. Create a new flag to indicate that a different method must be used to calculate the register's value even though there is no implementation for DWARF_VAL_OFFSET yet; it's mainly just a place holder. Signed-off-by: Matt Fleming --- arch/sh/include/asm/dwarf.h | 1 + arch/sh/kernel/dwarf.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index a22fbe98303f..8b0bcc087385 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h @@ -296,6 +296,7 @@ struct dwarf_reg { unsigned long addr; unsigned long flags; #define DWARF_REG_OFFSET (1 << 0) +#define DWARF_VAL_OFFSET (1 << 1) }; /* diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index e4810375207d..d271d04adccd 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -11,6 +11,7 @@ * * TODO: * - DWARF64 doesn't work. + * - Registers with DWARF_VAL_OFFSET rules aren't handled properly. */ /* #define DEBUG */ @@ -499,7 +500,7 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, count = dwarf_read_leb128(current_insn, &offset); offset *= cie->data_alignment_factor; regp = dwarf_frame_alloc_reg(frame, reg); - regp->flags |= DWARF_REG_OFFSET; + regp->flags |= DWARF_VAL_OFFSET; regp->addr = offset; break; case DW_CFA_GNU_args_size: -- cgit v1.2.3-59-g8ed1b From b344e24a8e8ceda83d1285d22e3e5baf4f5e42d3 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 16 Aug 2009 21:54:48 +0100 Subject: sh: unwinder: Introduce UNWINDER_BUG() and UNWINDER_BUG_ON() We can't assume that if we execute the unwinder code and the unwinder was already running that it has faulted. Clearly two kernel threads can invoke the unwinder at the same time and may be running simultaneously. The previous approach used BUG() and BUG_ON() in the unwinder code to detect whether the unwinder was incapable of unwinding the stack, and that the next available unwinder should be used instead. A better approach is to explicitly invoke a trap handler to switch unwinders when the current unwinder cannot continue. Signed-off-by: Matt Fleming --- arch/sh/include/asm/bug.h | 25 +++++++++++++++++++++++++ arch/sh/include/asm/system.h | 5 +++++ arch/sh/include/asm/unwinder.h | 6 ++++++ arch/sh/kernel/debugtraps.S | 6 +++++- arch/sh/kernel/dwarf.c | 38 +++++++++++++++++++++----------------- arch/sh/kernel/traps.c | 7 +++++-- arch/sh/kernel/traps_32.c | 1 + arch/sh/kernel/unwinder.c | 33 +++++++++++++++++++++++++++------ 8 files changed, 95 insertions(+), 26 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h index c01718040166..b7d9822fd6c2 100644 --- a/arch/sh/include/asm/bug.h +++ b/arch/sh/include/asm/bug.h @@ -1,6 +1,7 @@ #ifndef __ASM_SH_BUG_H #define __ASM_SH_BUG_H +#define TRAPA_UNWINDER_BUG_OPCODE 0xc33b /* trapa #0x3b */ #define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */ #ifdef CONFIG_GENERIC_BUG @@ -72,6 +73,30 @@ do { \ unlikely(__ret_warn_on); \ }) +#define UNWINDER_BUG() \ +do { \ + __asm__ __volatile__ ( \ + "1:\t.short %O0\n" \ + _EMIT_BUG_ENTRY \ + : \ + : "n" (TRAPA_UNWINDER_BUG_OPCODE), \ + "i" (__FILE__), \ + "i" (__LINE__), "i" (0), \ + "i" (sizeof(struct bug_entry))); \ +} while (0) + +#define UNWINDER_BUG_ON(x) ({ \ + int __ret_unwinder_on = !!(x); \ + if (__builtin_constant_p(__ret_unwinder_on)) { \ + if (__ret_unwinder_on) \ + UNWINDER_BUG(); \ + } else { \ + if (unlikely(__ret_unwinder_on)) \ + UNWINDER_BUG(); \ + } \ + unlikely(__ret_unwinder_on); \ +}) + #endif /* CONFIG_GENERIC_BUG */ #include diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index ab79e1f4fbe0..f9e2ceb94d9b 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -181,6 +181,11 @@ BUILD_TRAP_HANDLER(breakpoint); BUILD_TRAP_HANDLER(singlestep); BUILD_TRAP_HANDLER(fpu_error); BUILD_TRAP_HANDLER(fpu_state_restore); +BUILD_TRAP_HANDLER(unwinder); + +#ifdef CONFIG_BUG +extern void handle_BUG(struct pt_regs *); +#endif #define arch_align_stack(x) (x) diff --git a/arch/sh/include/asm/unwinder.h b/arch/sh/include/asm/unwinder.h index 3dc551453e28..1e65c07b3e18 100644 --- a/arch/sh/include/asm/unwinder.h +++ b/arch/sh/include/asm/unwinder.h @@ -22,4 +22,10 @@ extern void stack_reader_dump(struct task_struct *, struct pt_regs *, unsigned long *, const struct stacktrace_ops *, void *); +/* + * Used by fault handling code to signal to the unwinder code that it + * should switch to a different unwinder. + */ +extern int unwinder_faulted; + #endif /* _LINUX_UNWINDER_H */ diff --git a/arch/sh/kernel/debugtraps.S b/arch/sh/kernel/debugtraps.S index 591741383ee6..cb00e4a82d4d 100644 --- a/arch/sh/kernel/debugtraps.S +++ b/arch/sh/kernel/debugtraps.S @@ -19,6 +19,10 @@ #if !defined(CONFIG_SH_STANDARD_BIOS) #define sh_bios_handler debug_trap_handler +#endif + +#if !defined(CONFIG_DWARF_UNWINDER) +#define unwinder_trap_handler debug_trap_handler #endif .data @@ -35,7 +39,7 @@ ENTRY(debug_trap_table) .long debug_trap_handler /* 0x38 */ .long debug_trap_handler /* 0x39 */ .long debug_trap_handler /* 0x3a */ - .long debug_trap_handler /* 0x3b */ + .long unwinder_trap_handler /* 0x3b */ .long breakpoint_trap_handler /* 0x3c */ .long singlestep_trap_handler /* 0x3d */ .long bug_trap_handler /* 0x3e */ diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index d271d04adccd..606ece37eb42 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -69,7 +69,7 @@ static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame, * Let's just bomb hard here, we have no way to * gracefully recover. */ - BUG(); + UNWINDER_BUG(); } reg->number = reg_num; @@ -232,7 +232,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val, break; default: pr_debug("encoding=0x%x\n", (encoding & 0x70)); - BUG(); + UNWINDER_BUG(); } if ((encoding & 0x07) == 0x00) @@ -247,7 +247,7 @@ static int dwarf_read_encoded_value(char *addr, unsigned long *val, break; default: pr_debug("encoding=0x%x\n", encoding); - BUG(); + UNWINDER_BUG(); } return count; @@ -519,6 +519,7 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, break; default: pr_debug("unhandled DWARF instruction 0x%x\n", insn); + UNWINDER_BUG(); break; } } @@ -535,8 +536,8 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, * on the callstack. Each of the lower (older) stack frames are * linked via the "prev" member. */ -struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, - struct dwarf_frame *prev) +struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, + struct dwarf_frame *prev) { struct dwarf_frame *frame; struct dwarf_cie *cie; @@ -558,7 +559,7 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC); if (!frame) { printk(KERN_ERR "Unable to allocate a dwarf frame\n"); - BUG(); + UNWINDER_BUG(); } INIT_LIST_HEAD(&frame->reg_list); @@ -605,7 +606,8 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, case DWARF_FRAME_CFA_REG_OFFSET: if (prev) { reg = dwarf_frame_reg(prev, frame->cfa_register); - BUG_ON(!reg); + UNWINDER_BUG_ON(!reg); + UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET); addr = prev->cfa + reg->addr; frame->cfa = __raw_readl(addr); @@ -624,12 +626,13 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, frame->cfa += frame->cfa_offset; break; default: - BUG(); + UNWINDER_BUG(); } /* If we haven't seen the return address reg, we're screwed. */ reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG); - BUG_ON(!reg); + UNWINDER_BUG_ON(!reg); + UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET); addr = frame->cfa + reg->addr; frame->return_addr = __raw_readl(addr); @@ -664,7 +667,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, cie->cie_pointer = (unsigned long)entry; cie->version = *(char *)p++; - BUG_ON(cie->version != 1); + UNWINDER_BUG_ON(cie->version != 1); cie->augmentation = p; p += strlen(cie->augmentation) + 1; @@ -694,7 +697,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, count = dwarf_read_uleb128(p, &length); p += count; - BUG_ON((unsigned char *)p > end); + UNWINDER_BUG_ON((unsigned char *)p > end); cie->initial_instructions = p + length; cie->augmentation++; @@ -722,16 +725,16 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, * routine in the CIE * augmentation. */ - BUG(); + UNWINDER_BUG(); } else if (*cie->augmentation == 'S') { - BUG(); + UNWINDER_BUG(); } else { /* * Unknown augmentation. Assume * 'z' augmentation. */ p = cie->initial_instructions; - BUG_ON(!p); + UNWINDER_BUG_ON(!p); break; } } @@ -805,9 +808,11 @@ static int dwarf_parse_fde(void *entry, u32 entry_type, return 0; } -static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs, +static void dwarf_unwinder_dump(struct task_struct *task, + struct pt_regs *regs, unsigned long *sp, - const struct stacktrace_ops *ops, void *data) + const struct stacktrace_ops *ops, + void *data) { struct dwarf_frame *frame, *_frame; unsigned long return_addr; @@ -831,7 +836,6 @@ static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs, return_addr = frame->return_addr; ops->address(data, return_addr, 1); } - } static struct unwinder dwarf_unwinder = { diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index b3e0067db358..881b9a32b7de 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -8,7 +8,7 @@ #include #ifdef CONFIG_BUG -static void handle_BUG(struct pt_regs *regs) +void handle_BUG(struct pt_regs *regs) { enum bug_trap_type tt; tt = report_bug(regs->pc, regs); @@ -29,7 +29,10 @@ int is_valid_bugaddr(unsigned long addr) if (probe_kernel_address((insn_size_t *)addr, opcode)) return 0; - return opcode == TRAPA_BUG_OPCODE; + if (opcode == TRAPA_BUG_OPCODE || opcode == TRAPA_UNWINDER_BUG_OPCODE) + return 1; + + return 0; } #endif diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 563426487c6b..05a04b6df844 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -136,6 +136,7 @@ static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err) regs->pc = fixup->fixup; return; } + die(str, regs, err); } } diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c index 2b30fa28b440..b9c122abe251 100644 --- a/arch/sh/kernel/unwinder.c +++ b/arch/sh/kernel/unwinder.c @@ -53,8 +53,6 @@ static struct list_head unwinder_list = { static DEFINE_SPINLOCK(unwinder_lock); -static atomic_t unwinder_running = ATOMIC_INIT(0); - /** * select_unwinder - Select the best registered stack unwinder. * @@ -122,6 +120,8 @@ int unwinder_register(struct unwinder *u) return ret; } +int unwinder_faulted = 0; + /* * Unwind the call stack and pass information to the stacktrace_ops * functions. Also handle the case where we need to switch to a new @@ -144,19 +144,40 @@ void unwind_stack(struct task_struct *task, struct pt_regs *regs, * Hopefully this will give us a semi-reliable stacktrace so we * can diagnose why curr_unwinder->dump() faulted. */ - if (atomic_inc_return(&unwinder_running) != 1) { + if (unwinder_faulted) { spin_lock_irqsave(&unwinder_lock, flags); - if (!list_is_singular(&unwinder_list)) { + /* Make sure no one beat us to changing the unwinder */ + if (unwinder_faulted && !list_is_singular(&unwinder_list)) { list_del(&curr_unwinder->list); curr_unwinder = select_unwinder(); + + unwinder_faulted = 0; } spin_unlock_irqrestore(&unwinder_lock, flags); - atomic_dec(&unwinder_running); } curr_unwinder->dump(task, regs, sp, ops, data); +} + +/* + * Trap handler for UWINDER_BUG() statements. We must switch to the + * unwinder with the next highest rating. + */ +BUILD_TRAP_HANDLER(unwinder) +{ + insn_size_t insn; + TRAP_HANDLER_DECL; + + /* Rewind */ + regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); + insn = *(insn_size_t *)instruction_pointer(regs); + + /* Switch unwinders when unwind_stack() is called */ + unwinder_faulted = 1; - atomic_dec(&unwinder_running); +#ifdef CONFIG_BUG + handle_BUG(regs); +#endif } -- cgit v1.2.3-59-g8ed1b From fe98dd31eb9fe055703e0171134b4d074558d4e9 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Thu, 20 Aug 2009 17:00:21 +0100 Subject: sh: Setup the frame pointer in handle_interrupt When CONFIG_DWARF_UNWINDER is enabled setup r14 in handle_interrupt, so that we can figure out what function was running when we were interrupted. Signed-off-by: Matt Fleming --- arch/sh/kernel/cpu/sh3/entry.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 67ad6467c694..f89ec87e3fc3 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -508,6 +508,8 @@ ENTRY(handle_interrupt) bsr save_regs ! needs original pr value in k3 mov #-1, k2 ! default vector kept in k2 + setup_frame_reg + ! Setup return address and jump to do_IRQ mov.l 4f, r9 ! fetch return address lds r9, pr ! put return address in pr -- cgit v1.2.3-59-g8ed1b From 5480675dc60c7dda7146e506981b2b40a775cc1e Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Thu, 20 Aug 2009 19:42:34 +0100 Subject: sh: Fix bug calculating the end of the FDE instructions The 'end' member of struct dwarf_fde denotes one byte past the end of the CFA instruction stream for an FDE. The value of 'end' was being calcualted incorrectly, it was being set too high. This resulted in dwarf_cfa_execute_insns() interpreting data past the end of valid instructions, thus causing all sorts of weird crashes. Signed-off-by: Matt Fleming --- arch/sh/kernel/dwarf.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 606ece37eb42..e6f427cff5ba 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -751,7 +751,8 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, } static int dwarf_parse_fde(void *entry, u32 entry_type, - void *start, unsigned long len) + void *start, unsigned long len, + unsigned char *end) { struct dwarf_fde *fde; struct dwarf_cie *cie; @@ -798,7 +799,7 @@ static int dwarf_parse_fde(void *entry, u32 entry_type, /* Call frame instructions. */ fde->instructions = p; - fde->end = start + len; + fde->end = end; /* Add to list. */ spin_lock_irqsave(&dwarf_fde_lock, flags); @@ -932,7 +933,7 @@ static int __init dwarf_unwinder_init(void) else c_entries++; } else { - err = dwarf_parse_fde(entry, entry_type, p, len); + err = dwarf_parse_fde(entry, entry_type, p, len, end); if (err < 0) goto out; else -- cgit v1.2.3-59-g8ed1b From 5580e9044df9c0e87861739d8c527006ead92e52 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Thu, 20 Aug 2009 19:53:49 +0100 Subject: sh: Handle the DWARF op, DW_CFA_undefined Allow a DWARF register to have an undefined value. When applied to the DWARF return address register this lets lets us label a function as having no direct caller, e.g. kernel_thread_helper(). Signed-off-by: Matt Fleming --- arch/sh/include/asm/dwarf.h | 3 +++ arch/sh/kernel/dwarf.c | 13 +++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index 8b0bcc087385..c367ed3373c5 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h @@ -297,6 +297,7 @@ struct dwarf_reg { unsigned long flags; #define DWARF_REG_OFFSET (1 << 0) #define DWARF_VAL_OFFSET (1 << 1) +#define DWARF_UNDEFINED (1 << 2) }; /* @@ -370,6 +371,7 @@ extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, #define CFI_DEF_CFA .cfi_def_cfa #define CFI_REGISTER .cfi_register #define CFI_REL_OFFSET .cfi_rel_offset +#define CFI_UNDEFINED .cfi_undefined #else @@ -383,6 +385,7 @@ extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, #define CFI_DEF_CFA CFI_IGNORE #define CFI_REGISTER CFI_IGNORE #define CFI_REL_OFFSET CFI_IGNORE +#define CFI_UNDEFINED CFI_IGNORE #ifndef __ASSEMBLY__ static inline void dwarf_unwinder_init(void) diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index e6f427cff5ba..577302f31e6a 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -452,6 +452,8 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, case DW_CFA_undefined: count = dwarf_read_uleb128(current_insn, ®); current_insn += count; + regp = dwarf_frame_alloc_reg(frame, reg); + regp->flags |= DWARF_UNDEFINED; break; case DW_CFA_def_cfa: count = dwarf_read_uleb128(current_insn, @@ -629,9 +631,16 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, UNWINDER_BUG(); } - /* If we haven't seen the return address reg, we're screwed. */ reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG); - UNWINDER_BUG_ON(!reg); + + /* + * If we haven't seen the return address register or the return + * address column is undefined then we must assume that this is + * the end of the callstack. + */ + if (!reg || reg->flags == DWARF_UNDEFINED) + goto bail; + UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET); addr = frame->cfa + reg->addr; -- cgit v1.2.3-59-g8ed1b From 4ab8f241f6d510470c15b62ac10f6905ff5c97bd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 22 Aug 2009 03:43:15 +0900 Subject: sh: Export unwind_stack() to satisfy modular oprofile. If the oprofile code is built as a module, unwind_stack() as used by the oprofile backtrace code is not available, causing build breakage. Signed-off-by: Paul Mundt --- arch/sh/kernel/unwinder.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c index 2b30fa28b440..5f56ff3f55e0 100644 --- a/arch/sh/kernel/unwinder.c +++ b/arch/sh/kernel/unwinder.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -160,3 +161,4 @@ void unwind_stack(struct task_struct *task, struct pt_regs *regs, atomic_dec(&unwinder_running); } +EXPORT_SYMBOL_GPL(unwind_stack); -- cgit v1.2.3-59-g8ed1b From e115f2c17cbceee93b34d787a7a4a867fc73e7b4 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 22 Aug 2009 05:28:25 +0900 Subject: sh: unwinder: Use a special bug flag for unwinder traps. This simplifies the unwinder trap handling, dropping the use of the special trapa vector and simply piggybacking on top of the BUG support. A new BUGFLAG_UNWINDER is added for flagging the unwinder fault, before continuing on with regular BUG dispatch. Signed-off-by: Paul Mundt --- arch/sh/include/asm/bug.h | 11 ++++++----- arch/sh/kernel/debugtraps.S | 6 +----- arch/sh/kernel/traps.c | 21 +++++++++++++++++---- arch/sh/kernel/unwinder.c | 21 --------------------- 4 files changed, 24 insertions(+), 35 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h index b7d9822fd6c2..23c5504a3a01 100644 --- a/arch/sh/include/asm/bug.h +++ b/arch/sh/include/asm/bug.h @@ -1,8 +1,8 @@ #ifndef __ASM_SH_BUG_H #define __ASM_SH_BUG_H -#define TRAPA_UNWINDER_BUG_OPCODE 0xc33b /* trapa #0x3b */ #define TRAPA_BUG_OPCODE 0xc33e /* trapa #0x3e */ +#define BUGFLAG_UNWINDER (1 << 1) #ifdef CONFIG_GENERIC_BUG #define HAVE_ARCH_BUG @@ -73,15 +73,16 @@ do { \ unlikely(__ret_warn_on); \ }) -#define UNWINDER_BUG() \ +#define UNWINDER_BUG() \ do { \ __asm__ __volatile__ ( \ "1:\t.short %O0\n" \ - _EMIT_BUG_ENTRY \ + _EMIT_BUG_ENTRY \ : \ - : "n" (TRAPA_UNWINDER_BUG_OPCODE), \ + : "n" (TRAPA_BUG_OPCODE), \ "i" (__FILE__), \ - "i" (__LINE__), "i" (0), \ + "i" (__LINE__), \ + "i" (BUGFLAG_UNWINDER), \ "i" (sizeof(struct bug_entry))); \ } while (0) diff --git a/arch/sh/kernel/debugtraps.S b/arch/sh/kernel/debugtraps.S index cb00e4a82d4d..591741383ee6 100644 --- a/arch/sh/kernel/debugtraps.S +++ b/arch/sh/kernel/debugtraps.S @@ -19,10 +19,6 @@ #if !defined(CONFIG_SH_STANDARD_BIOS) #define sh_bios_handler debug_trap_handler -#endif - -#if !defined(CONFIG_DWARF_UNWINDER) -#define unwinder_trap_handler debug_trap_handler #endif .data @@ -39,7 +35,7 @@ ENTRY(debug_trap_table) .long debug_trap_handler /* 0x38 */ .long debug_trap_handler /* 0x39 */ .long debug_trap_handler /* 0x3a */ - .long unwinder_trap_handler /* 0x3b */ + .long debug_trap_handler /* 0x3b */ .long breakpoint_trap_handler /* 0x3c */ .long singlestep_trap_handler /* 0x3d */ .long bug_trap_handler /* 0x3e */ diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index 881b9a32b7de..f69bd968fcca 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -5,18 +5,32 @@ #include #include #include +#include #include #ifdef CONFIG_BUG void handle_BUG(struct pt_regs *regs) { + const struct bug_entry *bug; + unsigned long bugaddr = regs->pc; enum bug_trap_type tt; - tt = report_bug(regs->pc, regs); + + if (!is_valid_bugaddr(bugaddr)) + goto invalid; + + bug = find_bug(bugaddr); + + /* Switch unwinders when unwind_stack() is called */ + if (bug->flags & BUGFLAG_UNWINDER) + unwinder_faulted = 1; + + tt = report_bug(bugaddr, regs); if (tt == BUG_TRAP_TYPE_WARN) { - regs->pc += instruction_size(regs->pc); + regs->pc += instruction_size(bugaddr); return; } +invalid: die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff); } @@ -28,8 +42,7 @@ int is_valid_bugaddr(unsigned long addr) return 0; if (probe_kernel_address((insn_size_t *)addr, opcode)) return 0; - - if (opcode == TRAPA_BUG_OPCODE || opcode == TRAPA_UNWINDER_BUG_OPCODE) + if (opcode == TRAPA_BUG_OPCODE) return 1; return 0; diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c index e83861d9739c..468889d958f4 100644 --- a/arch/sh/kernel/unwinder.c +++ b/arch/sh/kernel/unwinder.c @@ -161,25 +161,4 @@ void unwind_stack(struct task_struct *task, struct pt_regs *regs, curr_unwinder->dump(task, regs, sp, ops, data); } - -/* - * Trap handler for UWINDER_BUG() statements. We must switch to the - * unwinder with the next highest rating. - */ -BUILD_TRAP_HANDLER(unwinder) -{ - insn_size_t insn; - TRAP_HANDLER_DECL; - - /* Rewind */ - regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); - insn = *(insn_size_t *)instruction_pointer(regs); - - /* Switch unwinders when unwind_stack() is called */ - unwinder_faulted = 1; - -#ifdef CONFIG_BUG - handle_BUG(regs); -#endif -} EXPORT_SYMBOL_GPL(unwind_stack); -- cgit v1.2.3-59-g8ed1b From 4f896ffca2b72f4b719746e7fbb0b623252e6ac9 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 22 Aug 2009 19:03:25 +0900 Subject: sh: unwinder: cacheline align slab cache objects. The CIE and FDE structs are big enough and accessed regularly enough in certain configurations to make cacheline alignment useful. Signed-off-by: Paul Mundt --- arch/sh/kernel/dwarf.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 577302f31e6a..700f7e0fd658 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -899,9 +899,12 @@ static int __init dwarf_unwinder_init(void) entry = &__start_eh_frame; dwarf_frame_cachep = kmem_cache_create("dwarf_frames", - sizeof(struct dwarf_frame), 0, SLAB_PANIC, NULL); + sizeof(struct dwarf_frame), 0, + SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); + dwarf_reg_cachep = kmem_cache_create("dwarf_regs", - sizeof(struct dwarf_reg), 0, SLAB_PANIC, NULL); + sizeof(struct dwarf_reg), 0, + SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, mempool_alloc_slab, -- cgit v1.2.3-59-g8ed1b From 6a93dde1e8216f7af9b2551a60fb1a5eeac4a89f Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Fri, 14 Aug 2009 10:48:16 +0000 Subject: sh: Runtime PM for SuperH Mobile platform bus devices This patch is V3 of the SuperH Mobile Runtime PM platform bus implentation matching Rafael's Runtime PM v16. The code gets invoked from the SuperH specific Runtime PM platform bus functions that override the weak symbols for: - platform_pm_runtime_suspend() - platform_pm_runtime_resume() - platform_pm_runtime_idle() This Runtime PM implementation performs two levels of power management. At the time of platform bus runtime suspend the clock to the device is stopped instantly. Later on if all devices within the power domain has their clocks stopped then the device driver ->runtime_suspend() callbacks are used to save hardware register state for each device. Device driver ->runtime_suspend() calls are scheduled from cpuidle context using platform_pm_runtime_suspend_idle(). When all devices have been fully suspended the processor is allowed to enter deep sleep from cpuidle. The runtime resume operation turns on clocks and also restores registers if needed. It is worth noting that the devices start in a suspended state and the device driver is responsible for calling runtime resume before accessing the actual hardware. In this particular platform bus implementation runtime resume is not allowed from interrupt context. Runtime suspend is however allowed from interrupt context as long as the synchronous functions are avoided. [ updated for v17 -- PFM. ] Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/include/asm/device.h | 9 + arch/sh/include/asm/hwblk.h | 4 +- arch/sh/kernel/cpu/shmobile/Makefile | 1 + arch/sh/kernel/cpu/shmobile/pm_runtime.c | 304 +++++++++++++++++++++++++++++++ 4 files changed, 317 insertions(+), 1 deletion(-) create mode 100644 arch/sh/kernel/cpu/shmobile/pm_runtime.c (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/device.h b/arch/sh/include/asm/device.h index 783ecdc64e25..b16debfe8c1e 100644 --- a/arch/sh/include/asm/device.h +++ b/arch/sh/include/asm/device.h @@ -14,6 +14,15 @@ int platform_resource_setup_memory(struct platform_device *pdev, void plat_early_device_setup(void); +#define PDEV_ARCHDATA_FLAG_INIT 0 +#define PDEV_ARCHDATA_FLAG_IDLE 1 +#define PDEV_ARCHDATA_FLAG_SUSP 2 + struct pdev_archdata { int hwblk_id; +#ifdef CONFIG_PM_RUNTIME + unsigned long flags; + struct list_head entry; + struct mutex mutex; +#endif }; diff --git a/arch/sh/include/asm/hwblk.h b/arch/sh/include/asm/hwblk.h index c01d72cb6757..5d3ccae4202b 100644 --- a/arch/sh/include/asm/hwblk.h +++ b/arch/sh/include/asm/hwblk.h @@ -5,7 +5,9 @@ #include #define HWBLK_CNT_USAGE 0 -#define HWBLK_CNT_NR 1 +#define HWBLK_CNT_IDLE 1 +#define HWBLK_CNT_DEVICES 2 +#define HWBLK_CNT_NR 3 #define HWBLK_AREA_FLAG_PARENT (1 << 0) /* valid parent */ diff --git a/arch/sh/kernel/cpu/shmobile/Makefile b/arch/sh/kernel/cpu/shmobile/Makefile index e8a5111e848a..a39f88ea1a85 100644 --- a/arch/sh/kernel/cpu/shmobile/Makefile +++ b/arch/sh/kernel/cpu/shmobile/Makefile @@ -5,3 +5,4 @@ # Power Management & Sleep mode obj-$(CONFIG_PM) += pm.o sleep.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o +obj-$(CONFIG_PM_RUNTIME) += pm_runtime.o diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c new file mode 100644 index 000000000000..d5f5285a2253 --- /dev/null +++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c @@ -0,0 +1,304 @@ +/* + * arch/sh/kernel/cpu/shmobile/pm_runtime.c + * + * Runtime PM support code for SuperH Mobile + * + * Copyright (C) 2009 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_SPINLOCK(hwblk_lock); +static LIST_HEAD(hwblk_idle_list); +static struct work_struct hwblk_work; + +extern struct hwblk_info *hwblk_info; + +static void platform_pm_runtime_not_idle(struct platform_device *pdev) +{ + unsigned long flags; + + /* remove device from idle list */ + spin_lock_irqsave(&hwblk_lock, flags); + if (test_bit(PDEV_ARCHDATA_FLAG_IDLE, &pdev->archdata.flags)) { + list_del(&pdev->archdata.entry); + __clear_bit(PDEV_ARCHDATA_FLAG_IDLE, &pdev->archdata.flags); + } + spin_unlock_irqrestore(&hwblk_lock, flags); +} + +static int __platform_pm_runtime_resume(struct platform_device *pdev) +{ + struct device *d = &pdev->dev; + struct pdev_archdata *ad = &pdev->archdata; + int hwblk = ad->hwblk_id; + int ret = -ENOSYS; + + dev_dbg(d, "__platform_pm_runtime_resume() [%d]\n", hwblk); + + if (d->driver && d->driver->pm && d->driver->pm->runtime_resume) { + hwblk_enable(hwblk_info, hwblk); + ret = 0; + + if (test_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags)) { + ret = d->driver->pm->runtime_resume(d); + if (!ret) + clear_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags); + else + hwblk_disable(hwblk_info, hwblk); + } + } + + dev_dbg(d, "__platform_pm_runtime_resume() [%d] - returns %d\n", + hwblk, ret); + + return ret; +} + +static int __platform_pm_runtime_suspend(struct platform_device *pdev) +{ + struct device *d = &pdev->dev; + struct pdev_archdata *ad = &pdev->archdata; + int hwblk = ad->hwblk_id; + int ret = -ENOSYS; + + dev_dbg(d, "__platform_pm_runtime_suspend() [%d]\n", hwblk); + + if (d->driver && d->driver->pm && d->driver->pm->runtime_suspend) { + BUG_ON(!test_bit(PDEV_ARCHDATA_FLAG_IDLE, &ad->flags)); + + hwblk_enable(hwblk_info, hwblk); + ret = d->driver->pm->runtime_suspend(d); + hwblk_disable(hwblk_info, hwblk); + + if (!ret) { + set_bit(PDEV_ARCHDATA_FLAG_SUSP, &ad->flags); + platform_pm_runtime_not_idle(pdev); + hwblk_cnt_dec(hwblk_info, hwblk, HWBLK_CNT_IDLE); + } + } + + dev_dbg(d, "__platform_pm_runtime_suspend() [%d] - returns %d\n", + hwblk, ret); + + return ret; +} + +static void platform_pm_runtime_work(struct work_struct *work) +{ + struct platform_device *pdev; + unsigned long flags; + int ret; + + /* go through the idle list and suspend one device at a time */ + do { + spin_lock_irqsave(&hwblk_lock, flags); + if (list_empty(&hwblk_idle_list)) + pdev = NULL; + else + pdev = list_first_entry(&hwblk_idle_list, + struct platform_device, + archdata.entry); + spin_unlock_irqrestore(&hwblk_lock, flags); + + if (pdev) { + mutex_lock(&pdev->archdata.mutex); + ret = __platform_pm_runtime_suspend(pdev); + + /* at this point the platform device may be: + * suspended: ret = 0, FLAG_SUSP set, clock stopped + * failed: ret < 0, FLAG_IDLE set, clock stopped + */ + mutex_unlock(&pdev->archdata.mutex); + } else { + ret = -ENODEV; + } + } while (!ret); +} + +/* this function gets called from cpuidle context when all devices in the + * main power domain are unused but some are counted as idle, ie the hwblk + * counter values are (HWBLK_CNT_USAGE == 0) && (HWBLK_CNT_IDLE != 0) + */ +void platform_pm_runtime_suspend_idle(void) +{ + queue_work(pm_wq, &hwblk_work); +} + +int platform_pm_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct pdev_archdata *ad = &pdev->archdata; + unsigned long flags; + int hwblk = ad->hwblk_id; + int ret = 0; + + dev_dbg(dev, "platform_pm_runtime_suspend() [%d]\n", hwblk); + + /* ignore off-chip platform devices */ + if (!hwblk) + goto out; + + /* interrupt context not allowed */ + might_sleep(); + + /* catch misconfigured drivers not starting with resume */ + if (test_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags)) { + ret = -EINVAL; + goto out; + } + + /* serialize */ + mutex_lock(&ad->mutex); + + /* disable clock */ + hwblk_disable(hwblk_info, hwblk); + + /* put device on idle list */ + spin_lock_irqsave(&hwblk_lock, flags); + list_add_tail(&pdev->archdata.entry, &hwblk_idle_list); + __set_bit(PDEV_ARCHDATA_FLAG_IDLE, &pdev->archdata.flags); + spin_unlock_irqrestore(&hwblk_lock, flags); + + /* increase idle count */ + hwblk_cnt_inc(hwblk_info, hwblk, HWBLK_CNT_IDLE); + + /* at this point the platform device is: + * idle: ret = 0, FLAG_IDLE set, clock stopped + */ + mutex_unlock(&ad->mutex); + +out: + dev_dbg(dev, "platform_pm_runtime_suspend() [%d] returns %d\n", + hwblk, ret); + + return ret; +} + +int platform_pm_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct pdev_archdata *ad = &pdev->archdata; + int hwblk = ad->hwblk_id; + int ret = 0; + + dev_dbg(dev, "platform_pm_runtime_resume() [%d]\n", hwblk); + + /* ignore off-chip platform devices */ + if (!hwblk) + goto out; + + /* interrupt context not allowed */ + might_sleep(); + + /* serialize */ + mutex_lock(&ad->mutex); + + /* make sure device is removed from idle list */ + platform_pm_runtime_not_idle(pdev); + + /* decrease idle count */ + if (!test_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags) && + !test_bit(PDEV_ARCHDATA_FLAG_SUSP, &pdev->archdata.flags)) + hwblk_cnt_dec(hwblk_info, hwblk, HWBLK_CNT_IDLE); + + /* resume the device if needed */ + ret = __platform_pm_runtime_resume(pdev); + + /* the driver has been initialized now, so clear the init flag */ + clear_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); + + /* at this point the platform device may be: + * resumed: ret = 0, flags = 0, clock started + * failed: ret < 0, FLAG_SUSP set, clock stopped + */ + mutex_unlock(&ad->mutex); +out: + dev_dbg(dev, "platform_pm_runtime_resume() [%d] returns %d\n", + hwblk, ret); + + return ret; +} + +int platform_pm_runtime_idle(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + int hwblk = pdev->archdata.hwblk_id; + int ret = 0; + + dev_dbg(dev, "platform_pm_runtime_idle() [%d]\n", hwblk); + + /* ignore off-chip platform devices */ + if (!hwblk) + goto out; + + /* interrupt context not allowed, use pm_runtime_put()! */ + might_sleep(); + + /* suspend synchronously to disable clocks immediately */ + ret = pm_runtime_suspend(dev); +out: + dev_dbg(dev, "platform_pm_runtime_idle() [%d] done!\n", hwblk); + return ret; +} + +static int __devinit platform_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct platform_device *pdev = to_platform_device(dev); + int hwblk = pdev->archdata.hwblk_id; + + /* ignore off-chip platform devices */ + if (!hwblk) + return 0; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + INIT_LIST_HEAD(&pdev->archdata.entry); + mutex_init(&pdev->archdata.mutex); + /* platform devices without drivers should be disabled */ + hwblk_enable(hwblk_info, hwblk); + hwblk_disable(hwblk_info, hwblk); + /* make sure driver re-inits itself once */ + __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); + break; + /* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */ + case BUS_NOTIFY_BOUND_DRIVER: + /* keep track of number of devices in use per hwblk */ + hwblk_cnt_inc(hwblk_info, hwblk, HWBLK_CNT_DEVICES); + break; + case BUS_NOTIFY_UNBOUND_DRIVER: + /* keep track of number of devices in use per hwblk */ + hwblk_cnt_dec(hwblk_info, hwblk, HWBLK_CNT_DEVICES); + /* make sure driver re-inits itself once */ + __set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags); + break; + case BUS_NOTIFY_DEL_DEVICE: + break; + } + return 0; +} + +static struct notifier_block platform_bus_notifier = { + .notifier_call = platform_bus_notify +}; + +static int __init sh_pm_runtime_init(void) +{ + INIT_WORK(&hwblk_work, platform_pm_runtime_work); + + bus_register_notifier(&platform_bus_type, &platform_bus_notifier); + return 0; +} + +core_initcall(sh_pm_runtime_init); -- cgit v1.2.3-59-g8ed1b From a62926fe4b76d539b77ee53197040153d43916f6 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 18 Aug 2009 10:41:06 +0900 Subject: sh: Fix section mismatch in platform bus notifier. The runtime PM for SH-Mobile code had platform_bus_notify() as __devinit, which is rather bogus. Kill off the annotation, which subsequently silences the section mismatch warnings. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/shmobile/pm_runtime.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c index d5f5285a2253..7c615b17e209 100644 --- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c +++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c @@ -251,8 +251,8 @@ out: return ret; } -static int __devinit platform_bus_notify(struct notifier_block *nb, - unsigned long action, void *data) +static int platform_bus_notify(struct notifier_block *nb, + unsigned long action, void *data) { struct device *dev = data; struct platform_device *pdev = to_platform_device(dev); @@ -300,5 +300,4 @@ static int __init sh_pm_runtime_init(void) bus_register_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } - core_initcall(sh_pm_runtime_init); -- cgit v1.2.3-59-g8ed1b From cc58f597afc63a57bb55ed97c2a72f7405320c93 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Wed, 19 Aug 2009 10:39:05 +0000 Subject: sh: drop static UIO clocks for sh7722, sh7723 and sh7724 The Runtime PM patch for UIO driver implements coarse grained dynamic power management for UIO devices. With that patch in place we can get rid of the static clock configuration. Which in turn makes it possible for cpuidle to enter deeper sleep. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/clock-sh7722.c | 6 +++--- arch/sh/kernel/cpu/sh4a/clock-sh7723.c | 6 +++--- arch/sh/kernel/cpu/sh4a/clock-sh7724.c | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c index 5b1bbbe63b1b..ea38b554dc05 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c @@ -164,11 +164,11 @@ static struct clk mstp_clks[] = { SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0), SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0), SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0), - SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, 0), SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0), SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0), - SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU, CLK_ENABLE_ON_INIT), - SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU, 0), + SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, 0), SH_HWBLK_CLK("lcdc0", -1, P_CLK, HWBLK_LCDC, 0), }; diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c index e5c63911403c..20a31c2255a8 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c @@ -190,12 +190,12 @@ static struct clk mstp_clks[] = { SH_HWBLK_CLK("usb0", -1, B_CLK, HWBLK_USB, 0), SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0), SH_HWBLK_CLK("siu0", -1, B_CLK, HWBLK_SIU, 0), - SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU2H1, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU2H1, 0), SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0), SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU, 0), SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU, 0), - SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU2H0, CLK_ENABLE_ON_INIT), - SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU2H0, 0), + SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, 0), SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0), }; diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index 34611d97378e..a0c4ec25bcbc 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c @@ -204,17 +204,17 @@ static struct clk mstp_clks[] = { SH_HWBLK_CLK("2dg0", -1, B_CLK, HWBLK_2DG, 0), SH_HWBLK_CLK("sdhi0", -1, B_CLK, HWBLK_SDHI0, 0), SH_HWBLK_CLK("sdhi1", -1, B_CLK, HWBLK_SDHI1, 0), - SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU1, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("veu1", -1, B_CLK, HWBLK_VEU1, 0), SH_HWBLK_CLK("ceu1", -1, B_CLK, HWBLK_CEU1, 0), SH_HWBLK_CLK("beu1", -1, B_CLK, HWBLK_BEU1, 0), SH_HWBLK_CLK("2ddmac0", -1, SH_CLK, HWBLK_2DDMAC, 0), SH_HWBLK_CLK("spu0", -1, B_CLK, HWBLK_SPU, 0), - SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("jpu0", -1, B_CLK, HWBLK_JPU, 0), SH_HWBLK_CLK("vou0", -1, B_CLK, HWBLK_VOU, 0), SH_HWBLK_CLK("beu0", -1, B_CLK, HWBLK_BEU0, 0), SH_HWBLK_CLK("ceu0", -1, B_CLK, HWBLK_CEU0, 0), - SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU0, CLK_ENABLE_ON_INIT), - SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, CLK_ENABLE_ON_INIT), + SH_HWBLK_CLK("veu0", -1, B_CLK, HWBLK_VEU0, 0), + SH_HWBLK_CLK("vpu0", -1, B_CLK, HWBLK_VPU, 0), SH_HWBLK_CLK("lcdc0", -1, B_CLK, HWBLK_LCDC, 0), }; -- cgit v1.2.3-59-g8ed1b From 9a4af027a03e10e97f56081cd7dd1fda5282bd9c Mon Sep 17 00:00:00 2001 From: Andre Draszik Date: Mon, 24 Aug 2009 14:38:27 +0900 Subject: sh: ratelimit unaligned fixups This patch makes sure we see messages about unaligned access fixups every now and then. Else especially userspace apps suffering from bad programming won't ever be noticed... Signed-off by: Andre Draszik Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/kernel/traps_32.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 05a04b6df844..866c7c7a8236 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -359,13 +359,6 @@ static inline int handle_delayslot(struct pt_regs *regs, #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) -/* - * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit - * opcodes.. - */ - -static int handle_unaligned_notify_count = 10; - int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, struct mem_access *ma) { @@ -375,15 +368,13 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, index = (instruction>>8)&15; /* 0x0F00 */ rm = regs->regs[index]; - /* shout about the first ten userspace fixups */ - if (user_mode(regs) && handle_unaligned_notify_count>0) { - handle_unaligned_notify_count--; - - printk(KERN_NOTICE "Fixing up unaligned userspace access " + /* shout about fixups */ + if (printk_ratelimit()) + printk(KERN_NOTICE "Fixing up unaligned %s access " "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", + user_mode(regs) ? "userspace" : "kernel", current->comm, task_pid_nr(current), (void *)regs->pc, instruction); - } ret = -EFAULT; switch (instruction&0xF000) { -- cgit v1.2.3-59-g8ed1b From 7436cde6b2ca71049051620c68c26522bb3403bf Mon Sep 17 00:00:00 2001 From: Andre Draszik Date: Mon, 24 Aug 2009 14:53:46 +0900 Subject: sh: Allow user control over misaligned fixup handling This patch brings the SH4 misaligned trap handler in line with what happens on ARM: Add a /proc/cpu/alignment which can be read from to get alignment trap statistics and written to to influence the behaviour of the alignment trap handling. The value to write is a bitfield, which has the following meaning: 1 warn, 2 fixup, 4 signal In addition, we add a /proc/cpu/kernel_alignment, to enable or disable warnings in case of kernel code causing alignment errors. Signed-off by: Andre Draszik Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/kernel/traps_32.c | 153 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 866c7c7a8236..0ad356d00ac8 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -44,6 +45,87 @@ #define TRAP_ILLEGAL_SLOT_INST 13 #endif +static unsigned long se_user; +static unsigned long se_sys; +static unsigned long se_skipped; +static unsigned long se_half; +static unsigned long se_word; +static unsigned long se_dword; +static unsigned long se_multi; +/* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not + valid! */ +static int se_usermode = 3; +/* 0: no warning 1: print a warning message */ +static int se_kernmode_warn = 1; + +#ifdef CONFIG_PROC_FS +static const char *se_usermode_action[] = { + "ignored", + "warn", + "fixup", + "fixup+warn", + "signal", + "signal+warn" +}; + +static int +proc_alignment_read(char *page, char **start, off_t off, int count, int *eof, + void *data) +{ + char *p = page; + int len; + + p += sprintf(p, "User:\t\t%lu\n", se_user); + p += sprintf(p, "System:\t\t%lu\n", se_sys); + p += sprintf(p, "Skipped:\t%lu\n", se_skipped); + p += sprintf(p, "Half:\t\t%lu\n", se_half); + p += sprintf(p, "Word:\t\t%lu\n", se_word); + p += sprintf(p, "DWord:\t\t%lu\n", se_dword); + p += sprintf(p, "Multi:\t\t%lu\n", se_multi); + p += sprintf(p, "User faults:\t%i (%s)\n", se_usermode, + se_usermode_action[se_usermode]); + p += sprintf(p, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn, + se_kernmode_warn ? "+warn" : ""); + + len = (p - page) - off; + if (len < 0) + len = 0; + + *eof = (len <= count) ? 1 : 0; + *start = page + off; + + return len; +} + +static int proc_alignment_write(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + char mode; + + if (count > 0) { + if (get_user(mode, buffer)) + return -EFAULT; + if (mode >= '0' && mode <= '5') + se_usermode = mode - '0'; + } + return count; +} + +static int proc_alignment_kern_write(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + char mode; + + if (count > 0) { + if (get_user(mode, buffer)) + return -EFAULT; + if (mode >= '0' && mode <= '1') + se_kernmode_warn = mode - '0'; + } + return count; +} +#endif + static void dump_mem(const char *str, unsigned long bottom, unsigned long top) { unsigned long p; @@ -194,6 +276,13 @@ static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs, count = 1<<(instruction&3); + switch (count) { + case 1: se_half += 1; break; + case 2: se_word += 1; break; + case 4: se_dword += 1; break; + case 8: se_multi += 1; break; /* ??? */ + } + ret = -EFAULT; switch (instruction>>12) { case 0: /* mov.[bwl] to/from memory via r0+rn */ @@ -530,6 +619,27 @@ asmlinkage void do_address_error(struct pt_regs *regs, local_irq_enable(); + se_user += 1; + + /* shout about userspace fixups */ + if (se_usermode & 1) + printk(KERN_NOTICE "Unaligned userspace access " + "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", + current->comm, current->pid, (void *)regs->pc, + instruction); + + if (se_usermode & 2) + goto fixup; + + if (se_usermode & 4) + goto uspace_segv; + else { + /* ignore */ + trace_mark(kernel_arch_trap_exit, MARK_NOARGS); + return; + } + +fixup: /* bad PC is not something we can fix */ if (regs->pc & 1) { si_code = BUS_ADRALN; @@ -563,6 +673,14 @@ uspace_segv: info.si_addr = (void __user *)address; force_sig_info(SIGBUS, &info, current); } else { + se_sys += 1; + + if (se_kernmode_warn) + printk(KERN_NOTICE "Unaligned kernel access " + "on behalf of \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", + current->comm, current->pid, (void *)regs->pc, + instruction); + if (regs->pc & 1) die("unaligned program counter", regs, error_code); @@ -872,3 +990,38 @@ void dump_stack(void) show_stack(NULL, NULL); } EXPORT_SYMBOL(dump_stack); + +#ifdef CONFIG_PROC_FS +/* + * This needs to be done after sysctl_init, otherwise sys/ will be + * overwritten. Actually, this shouldn't be in sys/ at all since + * it isn't a sysctl, and it doesn't contain sysctl information. + * We now locate it in /proc/cpu/alignment instead. + */ +static int __init alignment_init(void) +{ + struct proc_dir_entry *dir, *res; + + dir = proc_mkdir("cpu", NULL); + if (!dir) + return -ENOMEM; + + res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, dir); + if (!res) + return -ENOMEM; + + res->read_proc = proc_alignment_read; + res->write_proc = proc_alignment_write; + + res = create_proc_entry("kernel_alignment", S_IWUSR | S_IRUGO, dir); + if (!res) + return -ENOMEM; + + res->read_proc = proc_alignment_read; + res->write_proc = proc_alignment_kern_write; + + return 0; +} + +fs_initcall(alignment_init); +#endif -- cgit v1.2.3-59-g8ed1b From 5a0ab35e43a6e3c69893c0091fe6a78ea8b3e443 Mon Sep 17 00:00:00 2001 From: Andre Draszik Date: Mon, 24 Aug 2009 15:01:10 +0900 Subject: sh: cleanup of do_address_error() This patch fixes a few problems with the existing code in do_address_error(). a) the variable used to printk()d the offending instruction wasn't initialized correctly. This is a fix to bug 5727 b) behaviour for CONFIG_CPU_SH2A wasn't correct c) the 'ignore address error' behaviour didn't update the PC, causing an infinite loop. Signed-off-by: Andre Draszik Signed-off-by: Paul Mundt --- arch/sh/kernel/traps_32.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 0ad356d00ac8..c581dc31d92a 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -621,12 +621,21 @@ asmlinkage void do_address_error(struct pt_regs *regs, se_user += 1; +#ifndef CONFIG_CPU_SH2A + set_fs(USER_DS); + if (copy_from_user(&instruction, (u16 *)(regs->pc & ~1), 2)) { + set_fs(oldfs); + goto uspace_segv; + } + set_fs(oldfs); + /* shout about userspace fixups */ if (se_usermode & 1) printk(KERN_NOTICE "Unaligned userspace access " "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", current->comm, current->pid, (void *)regs->pc, instruction); +#endif if (se_usermode & 2) goto fixup; @@ -635,7 +644,7 @@ asmlinkage void do_address_error(struct pt_regs *regs, goto uspace_segv; else { /* ignore */ - trace_mark(kernel_arch_trap_exit, MARK_NOARGS); + regs->pc += instruction_size(instruction); return; } @@ -647,15 +656,6 @@ fixup: } set_fs(USER_DS); - if (copy_from_user(&instruction, (void __user *)(regs->pc), - sizeof(instruction))) { - /* Argh. Fault on the instruction itself. - This should never happen non-SMP - */ - set_fs(oldfs); - goto uspace_segv; - } - tmp = handle_unaligned_access(instruction, regs, &user_mem_access); set_fs(oldfs); -- cgit v1.2.3-59-g8ed1b From 2fc742f8d64c68b4a175a1dcb28351b112d63315 Mon Sep 17 00:00:00 2001 From: Carl Shaw Date: Mon, 24 Aug 2009 15:07:08 +0900 Subject: sh: Improve unwind info for signals GCC does not issue unwind information for function epilogues. Unfortunately we can catch a signal during an epilogue. The signal handler writes the current context and signal return code onto the stack overwriting previous contents. During unwinding, libgcc can try to restore registers from the stack and restores corrupted ones. This can lead to segmentation, misaligned access and sigbus faults. For example, consider the following code: mov.l r12,@-r15 mov.l r14,@-r15 sts.l pr,@-r15 mov r15,r14 mov r14, r15 lds.l @r15+, pr <<< SIGNAL HERE mov.l @r15+, r14 mov.l @r15+, r12 rts Unwind is aware that pr was pushed to stack in prolog, so tries to restore it. Unfortunately it restores the last word of the signal handler code placed on the stack by the kernel. This patch tries to avoid the problem by adding a guard region on the stack between where the function pushes data and where the signal handler pushes its return code. We probably don't see this problem often because exception handling unwinding in an epilogue only occurs due to a pthread cancel signal. Also the kernel signal stack handler alignment of 8 bytes could hide the occurance of this problem sometimes as the stack may not be trampled at a particular required word. This is not guaranteed to always work. It relies on a frame pointer existing for the function (so it can get the correct sp value) which is not always the case for the SH4. Modifications will also be made to libgcc for the case where there is no fp. Signed-off-by: Carl Shaw Signed-off-by: Paul Mundt --- arch/sh/kernel/signal_32.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index b5afbec1db59..6010750c90b4 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -40,6 +40,16 @@ struct fdpic_func_descriptor { unsigned long GOT; }; +/* + * The following define adds a 64 byte gap between the signal + * stack frame and previous contents of the stack. This allows + * frame unwinding in a function epilogue but only if a frame + * pointer is used in the function. This is necessary because + * current gcc compilers (<4.3) do not generate unwind info on + * SH for function epilogues. + */ +#define UNWINDGUARD 64 + /* * Atomically swap in the new signal mask, and wait for a signal. */ @@ -327,7 +337,7 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) sp = current->sas_ss_sp + current->sas_ss_size; } - return (void __user *)((sp - frame_size) & -8ul); + return (void __user *)((sp - (frame_size+UNWINDGUARD)) & -8ul); } /* These symbols are defined with the addresses in the vsyscall page. -- cgit v1.2.3-59-g8ed1b From 15444a8973dcfbd286b3e638cbadac2446a9271a Mon Sep 17 00:00:00 2001 From: David McKay Date: Mon, 24 Aug 2009 16:10:40 +0900 Subject: sh: Allow use of GENERIC_IOMAP The synopsys PCI cell used in the later STMicro chips requires code to be run in order to do IO cycles, rather than just memory mapping the IO space. Rather than extending the existing SH infrastructure to allow this, use the GENERIC_IOMAP implmentation to save re-inventing the wheel. This set of changes allows the SH to be built with GENERIC_IOMAP enabled, it just ifdef's out the functions provided by the GENERIC_IOMAP implementation, and provides a few required missing functions. Signed-off-by: David McKay Signed-off-by: Paul Mundt --- arch/sh/drivers/pci/pci.c | 4 ++++ arch/sh/include/asm/io.h | 10 ++++++++++ arch/sh/kernel/io.c | 4 ++++ 3 files changed, 18 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c index 9a1c423ad167..c481df639022 100644 --- a/arch/sh/drivers/pci/pci.c +++ b/arch/sh/drivers/pci/pci.c @@ -295,6 +295,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, vma->vm_page_prot); } +#ifndef CONFIG_GENERIC_IOMAP + static void __iomem *ioport_map_pci(struct pci_dev *dev, unsigned long port, unsigned int nr) { @@ -346,6 +348,8 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *addr) } EXPORT_SYMBOL(pci_iounmap); +#endif /* CONFIG_GENERIC_IOMAP */ + #ifdef CONFIG_HOTPLUG EXPORT_SYMBOL(pcibios_resource_to_bus); EXPORT_SYMBOL(pcibios_bus_to_resource); diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 25348141674b..aeecf432d48c 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -146,6 +146,7 @@ __BUILD_MEMORY_STRING(q, u64) #define readl_relaxed(a) readl(a) #define readq_relaxed(a) readq(a) +#ifndef CONFIG_GENERIC_IOMAP /* Simple MMIO */ #define ioread8(a) __raw_readb(a) #define ioread16(a) __raw_readw(a) @@ -166,6 +167,15 @@ __BUILD_MEMORY_STRING(q, u64) #define iowrite8_rep(a, s, c) __raw_writesb((a), (s), (c)) #define iowrite16_rep(a, s, c) __raw_writesw((a), (s), (c)) #define iowrite32_rep(a, s, c) __raw_writesl((a), (s), (c)) +#endif + +#define mmio_insb(p,d,c) __raw_readsb(p,d,c) +#define mmio_insw(p,d,c) __raw_readsw(p,d,c) +#define mmio_insl(p,d,c) __raw_readsl(p,d,c) + +#define mmio_outsb(p,s,c) __raw_writesb(p,s,c) +#define mmio_outsw(p,s,c) __raw_writesw(p,s,c) +#define mmio_outsl(p,s,c) __raw_writesl(p,s,c) /* synco on SH-4A, otherwise a nop */ #define mmiowb() wmb() diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c index 4f85fffaa557..d0ca9684b781 100644 --- a/arch/sh/kernel/io.c +++ b/arch/sh/kernel/io.c @@ -62,6 +62,8 @@ void memset_io(volatile void __iomem *dst, int c, unsigned long count) } EXPORT_SYMBOL(memset_io); +#ifndef CONFIG_GENERIC_IOMAP + void __iomem *ioport_map(unsigned long port, unsigned int nr) { void __iomem *ret; @@ -79,3 +81,5 @@ void ioport_unmap(void __iomem *addr) sh_mv.mv_ioport_unmap(addr); } EXPORT_SYMBOL(ioport_unmap); + +#endif /* CONFIG_GENERIC_IOMAP */ -- cgit v1.2.3-59-g8ed1b From 27a30f53bbb77fd7e2e0459197e23b63fe3b74d4 Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 24 Aug 2009 16:14:03 +0900 Subject: sh: kgdb: do not reload VBR while handling debugger breackpoint Save the VBR allowing GDB to dump full registers set but do not reload it as soon as the kgdb_handle_exception is invoked. Signed-off-by: Giuseppe Cavallaro Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/kernel/kgdb.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c index 305aad742aec..d29de7864f32 100644 --- a/arch/sh/kernel/kgdb.c +++ b/arch/sh/kernel/kgdb.c @@ -195,8 +195,6 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) regs->gbr = gdb_regs[GDB_GBR]; regs->mach = gdb_regs[GDB_MACH]; regs->macl = gdb_regs[GDB_MACL]; - - __asm__ __volatile__ ("ldc %0, vbr" : : "r" (gdb_regs[GDB_VBR])); } void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) -- cgit v1.2.3-59-g8ed1b From b46373e0d4b9f714ab757aae0c19c41fbcc73ef5 Mon Sep 17 00:00:00 2001 From: Jon Frosdick Date: Mon, 24 Aug 2009 16:20:44 +0900 Subject: sh: Use internal watchdog timer to perform reset This patches will trigger a reboot using the watchdog timer instead of double fault. Unlike the previous method, this one actually works in 32 bit mode. Reset should also be cleaner. Signed-off-by: Jon Frosdick Signed-off-by: Carl Shaw Signed-off-by: Paul Mundt --- arch/sh/kernel/process_32.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 9fee977f176b..0673c4746be3 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -32,15 +32,35 @@ #include #include #include +#include int ubc_usercnt = 0; +#ifdef CONFIG_32BIT +static void watchdog_trigger_immediate(void) +{ + sh_wdt_write_cnt(0xFF); + sh_wdt_write_csr(0xC2); +} + +void machine_restart(char * __unused) +{ + local_irq_disable(); + + /* Use watchdog timer to trigger reset */ + watchdog_trigger_immediate(); + + while (1) + cpu_sleep(); +} +#else void machine_restart(char * __unused) { /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */ asm volatile("ldc %0, sr\n\t" "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001)); } +#endif void machine_halt(void) { -- cgit v1.2.3-59-g8ed1b From d724a9c9d572e092d1ce820463f082697487b874 Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Mon, 24 Aug 2009 16:25:38 +0900 Subject: sh: Allow for kernel command line concatenation. So far kernel command line arguments could be passed in by a bootloader or defined as CONFIG_CMDLINE, which completely overwriting the first one. This change allows a developer to declare selected kernel parameters in a kernel configuration (eg. project-specific defconfig), retaining possibility of passing others by a bootloader. The obvious examples of the first type are MTD partition or bigphysarea-like region definitions, while "debug" option or network configuration should be given by a bootloader or a JTAG boot script. Signed-off-by: Pawel Moll Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 27 +++++++++++++++++++++++---- arch/sh/kernel/setup.c | 6 +++++- 2 files changed, 28 insertions(+), 5 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 80b4f9a743a1..2f5352c06a0e 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -767,12 +767,31 @@ config UBC_WAKEUP If unsure, say N. -config CMDLINE_BOOL - bool "Default bootloader kernel arguments" +choice + prompt "Kernel command line" + optional + default CMDLINE_OVERWRITE + help + Setting this option allows the kernel command line arguments + to be set. + +config CMDLINE_OVERWRITE + bool "Overwrite bootloader kernel arguments" + help + Given string will overwrite any arguments passed in by + a bootloader. + +config CMDLINE_EXTEND + bool "Extend bootloader kernel arguments" + help + Given string will be concatenated with arguments passed in + by a bootloader. + +endchoice config CMDLINE - string "Initial kernel command string" - depends on CMDLINE_BOOL + string "Kernel command line arguments string" + depends on CMDLINE_OVERWRITE || CMDLINE_EXTEND default "console=ttySC1,115200" endmenu diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 212e6bddaeb8..d13bbafb4e1b 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -404,10 +404,14 @@ void __init setup_arch(char **cmdline_p) if (!memory_end) memory_end = memory_start + __MEMORY_SIZE; -#ifdef CONFIG_CMDLINE_BOOL +#ifdef CONFIG_CMDLINE_OVERWRITE strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); #else strlcpy(command_line, COMMAND_LINE, sizeof(command_line)); +#ifdef CONFIG_CMDLINE_EXTEND + strlcat(command_line, " ", sizeof(command_line)); + strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line)); +#endif #endif /* Save unparsed command line copy for /proc/cmdline */ -- cgit v1.2.3-59-g8ed1b From fea966f7564205fcf5919af9bde031e753419c96 Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Mon, 24 Aug 2009 17:09:53 +0900 Subject: sh: Remove implicit sign extension from assembler immediates The SH instruction set has several instructions which accept an 8 bit immediate operand. For logical instructions this operand is zero extended, for arithmetic instructions the operand is sign extended. After adding an option to the assembler to check this, it was found that several pieces of assembly code were assuming this behaviour, and in one case getting it wrong. So this patch explicitly sign extends any immediate operands, which makes it obvious what is happening, and fixes the one case which got it wrong. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/boot/compressed/head_32.S | 2 +- arch/sh/include/asm/entry-macros.S | 2 +- arch/sh/kernel/cpu/sh3/entry.S | 2 +- arch/sh/kernel/entry-common.S | 5 +++-- arch/sh/lib/clear_page.S | 2 +- 5 files changed, 7 insertions(+), 6 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/boot/compressed/head_32.S b/arch/sh/boot/compressed/head_32.S index 06ac31f3be88..02a30935f0b9 100644 --- a/arch/sh/boot/compressed/head_32.S +++ b/arch/sh/boot/compressed/head_32.S @@ -22,7 +22,7 @@ startup: bt clear_bss sub r0, r2 mov.l bss_start_addr, r0 - mov #0xe0, r1 + mov #0xffffffe0, r1 and r1, r0 ! align cache line mov.l text_start_addr, r3 mov r0, r1 diff --git a/arch/sh/include/asm/entry-macros.S b/arch/sh/include/asm/entry-macros.S index 64fd0de24daf..cc43a55e1fcf 100644 --- a/arch/sh/include/asm/entry-macros.S +++ b/arch/sh/include/asm/entry-macros.S @@ -7,7 +7,7 @@ .endm .macro sti - mov #0xf0, r11 + mov #0xfffffff0, r11 extu.b r11, r11 not r11, r11 stc sr, r10 diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 8c19e21847d7..9421ec715fd2 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -257,7 +257,7 @@ restore_all: ! ! Calculate new SR value mov k3, k2 ! original SR value - mov #0xf0, k1 + mov #0xfffffff0, k1 extu.b k1, k1 not k1, k1 and k1, k2 ! Mask original SR value diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 700477601c6f..68d9223b145e 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -98,8 +98,9 @@ need_resched: mov #OFF_SR, r0 mov.l @(r0,r15), r0 ! get status register - and #0xf0, r0 ! interrupts off (exception path)? - cmp/eq #0xf0, r0 + shlr r0 + and #(0xf0>>1), r0 ! interrupts off (exception path)? + cmp/eq #(0xf0>>1), r0 bt noresched mov.l 3f, r0 jsr @r0 ! call preempt_schedule_irq diff --git a/arch/sh/lib/clear_page.S b/arch/sh/lib/clear_page.S index 8342bfbde64c..c92244d4ff9d 100644 --- a/arch/sh/lib/clear_page.S +++ b/arch/sh/lib/clear_page.S @@ -57,7 +57,7 @@ ENTRY(clear_page) ENTRY(__clear_user) ! mov #0, r0 - mov #0xe0, r1 ! 0xffffffe0 + mov #0xffffffe0, r1 ! ! r4..(r4+31)&~32 -------- not aligned [ Area 0 ] ! (r4+31)&~32..(r4+r5)&~32 -------- aligned [ Area 1 ] -- cgit v1.2.3-59-g8ed1b From 8af57f8b4c0ada9063b1cee9d81e3e59f04ce5a2 Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Mon, 24 Aug 2009 17:26:39 +0900 Subject: sh: generic_in/outs{bwl} optimizations. After performing the port2addr conversion, and checking that the data is correctly aligned, simply call __raw_readsX/writesX. These have already been optimised. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/kernel/io_generic.c | 50 ++++++++------------------------------------- 1 file changed, 8 insertions(+), 42 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c index 5a7f554d9ca1..4ff507239286 100644 --- a/arch/sh/kernel/io_generic.c +++ b/arch/sh/kernel/io_generic.c @@ -73,35 +73,19 @@ u32 generic_inl_p(unsigned long port) void generic_insb(unsigned long port, void *dst, unsigned long count) { - volatile u8 *port_addr; - u8 *buf = dst; - - port_addr = (volatile u8 __force *)__ioport_map(port, 1); - while (count--) - *buf++ = *port_addr; + __raw_readsb(__ioport_map(port, 1), dst, count); + dummy_read(); } void generic_insw(unsigned long port, void *dst, unsigned long count) { - volatile u16 *port_addr; - u16 *buf = dst; - - port_addr = (volatile u16 __force *)__ioport_map(port, 2); - while (count--) - *buf++ = *port_addr; - + __raw_readsw(__ioport_map(port, 2), dst, count); dummy_read(); } void generic_insl(unsigned long port, void *dst, unsigned long count) { - volatile u32 *port_addr; - u32 *buf = dst; - - port_addr = (volatile u32 __force *)__ioport_map(port, 4); - while (count--) - *buf++ = *port_addr; - + __raw_readsl(__ioport_map(port, 4), dst, count); dummy_read(); } @@ -145,37 +129,19 @@ void generic_outl_p(u32 b, unsigned long port) */ void generic_outsb(unsigned long port, const void *src, unsigned long count) { - volatile u8 *port_addr; - const u8 *buf = src; - - port_addr = (volatile u8 __force *)__ioport_map(port, 1); - - while (count--) - *port_addr = *buf++; + __raw_writesb(__ioport_map(port, 1), src, count); + dummy_read(); } void generic_outsw(unsigned long port, const void *src, unsigned long count) { - volatile u16 *port_addr; - const u16 *buf = src; - - port_addr = (volatile u16 __force *)__ioport_map(port, 2); - - while (count--) - *port_addr = *buf++; - + __raw_writesw(__ioport_map(port, 2), src, count); dummy_read(); } void generic_outsl(unsigned long port, const void *src, unsigned long count) { - volatile u32 *port_addr; - const u32 *buf = src; - - port_addr = (volatile u32 __force *)__ioport_map(port, 4); - while (count--) - *port_addr = *buf++; - + __raw_writesl(__ioport_map(port, 4), src, count); dummy_read(); } -- cgit v1.2.3-59-g8ed1b From 5e9377ec6f84e5334e9347e84e77d34e9a089ca7 Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Mon, 24 Aug 2009 17:35:07 +0900 Subject: sh: Optimise memcpy_to/fromio for SH4 Optimise memcpy_to/fromio. This is used extensivly by MTD, so is a worthwhile performance gain. The main savings come from not repeatedly calling readl/writel, and doing word instead of byte at a time transfers. Also using "movca.l" on SH4 gives a small performance win. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/kernel/io.c | 93 +++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 72 insertions(+), 21 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c index d0ca9684b781..4770c241c679 100644 --- a/arch/sh/kernel/io.c +++ b/arch/sh/kernel/io.c @@ -1,12 +1,9 @@ /* - * linux/arch/sh/kernel/io.c + * arch/sh/kernel/io.c - Machine independent I/O functions. * - * Copyright (C) 2000 Stuart Menefy + * Copyright (C) 2000 - 2009 Stuart Menefy * Copyright (C) 2005 Paul Mundt * - * Provide real functions which expand to whatever the header file defined. - * Also definitions of machine independent IO functions. - * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. @@ -18,33 +15,87 @@ /* * Copy data from IO memory space to "real" memory space. - * This needs to be optimized. */ void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count) { - unsigned char *p = to; - while (count) { - count--; - *p = readb(from); - p++; - from++; - } + /* + * Would it be worthwhile doing byte and long transfers first + * to try and get aligned? + */ +#ifdef CONFIG_CPU_SH4 + if ((count >= 0x20) && + (((u32)to & 0x1f) == 0) && (((u32)from & 0x3) == 0)) { + int tmp2, tmp3, tmp4, tmp5, tmp6; + + __asm__ __volatile__( + "1: \n\t" + "mov.l @%7+, r0 \n\t" + "mov.l @%7+, %2 \n\t" + "movca.l r0, @%0 \n\t" + "mov.l @%7+, %3 \n\t" + "mov.l @%7+, %4 \n\t" + "mov.l @%7+, %5 \n\t" + "mov.l @%7+, %6 \n\t" + "mov.l @%7+, r7 \n\t" + "mov.l @%7+, r0 \n\t" + "mov.l %2, @(0x04,%0) \n\t" + "mov #0x20, %2 \n\t" + "mov.l %3, @(0x08,%0) \n\t" + "sub %2, %1 \n\t" + "mov.l %4, @(0x0c,%0) \n\t" + "cmp/hi %1, %2 ! T if 32 > count \n\t" + "mov.l %5, @(0x10,%0) \n\t" + "mov.l %6, @(0x14,%0) \n\t" + "mov.l r7, @(0x18,%0) \n\t" + "mov.l r0, @(0x1c,%0) \n\t" + "bf.s 1b \n\t" + " add #0x20, %0 \n\t" + : "=&r" (to), "=&r" (count), + "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4), + "=&r" (tmp5), "=&r" (tmp6), "=&r" (from) + : "7"(from), "0" (to), "1" (count) + : "r0", "r7", "t", "memory"); + } +#endif + + if ((((u32)to | (u32)from) & 0x3) == 0) { + for (; count > 3; count -= 4) { + *(u32 *)to = *(volatile u32 *)from; + to += 4; + from += 4; + } + } + + for (; count > 0; count--) { + *(u8 *)to = *(volatile u8 *)from; + to++; + from++; + } + + mb(); } EXPORT_SYMBOL(memcpy_fromio); /* * Copy data from "real" memory space to IO memory space. - * This needs to be optimized. */ void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count) { - const unsigned char *p = from; - while (count) { - count--; - writeb(*p, to); - p++; - to++; - } + if ((((u32)to | (u32)from) & 0x3) == 0) { + for ( ; count > 3; count -= 4) { + *(volatile u32 *)to = *(u32 *)from; + to += 4; + from += 4; + } + } + + for (; count > 0; count--) { + *(volatile u8 *)to = *(u8 *)from; + to++; + from++; + } + + mb(); } EXPORT_SYMBOL(memcpy_toio); -- cgit v1.2.3-59-g8ed1b From 6d243dd37002bcee54841852ab1b8606fd457851 Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Mon, 24 Aug 2009 18:16:56 +0900 Subject: sh: Add sys_cacheflush() call for SH CPUs. Adds a system call to allow user code to flush code from the cache. You can use instructions for the data side, but the iside can only be done by a flush ROM which really only works with a direct mapped cache. The later SH4's have 2 way Iside, so this call allows a portable way to flush the cache. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/include/asm/Kbuild | 2 +- arch/sh/include/asm/cachectl.h | 12 ++++++++++++ arch/sh/include/asm/unistd_32.h | 2 +- arch/sh/include/asm/unistd_64.h | 2 +- arch/sh/kernel/sys_sh.c | 43 +++++++++++++++++++++++++++++++++++++++++ arch/sh/kernel/syscalls_32.S | 2 +- arch/sh/kernel/syscalls_64.S | 2 +- 7 files changed, 60 insertions(+), 5 deletions(-) create mode 100644 arch/sh/include/asm/cachectl.h (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild index 43910cdf78a5..e121c30f797d 100644 --- a/arch/sh/include/asm/Kbuild +++ b/arch/sh/include/asm/Kbuild @@ -1,6 +1,6 @@ include include/asm-generic/Kbuild.asm -header-y += cpu-features.h +header-y += cachectl.h cpu-features.h unifdef-y += unistd_32.h unifdef-y += unistd_64.h diff --git a/arch/sh/include/asm/cachectl.h b/arch/sh/include/asm/cachectl.h new file mode 100644 index 000000000000..305dd7082a66 --- /dev/null +++ b/arch/sh/include/asm/cachectl.h @@ -0,0 +1,12 @@ +#ifndef _SH_CACHECTL_H +#define _SH_CACHECTL_H + +/* Definitions for the cacheflush system call. */ + +#define CACHEFLUSH_D_INVAL 0x1 /* invalidate (without write back) */ +#define CACHEFLUSH_D_WB 0x2 /* write back (without invalidate) */ +#define CACHEFLUSH_D_PURGE 0x3 /* writeback and invalidate */ + +#define CACHEFLUSH_I 0x4 + +#endif /* _SH_CACHECTL_H */ diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h index 61d6ad93d786..925dd40d9d55 100644 --- a/arch/sh/include/asm/unistd_32.h +++ b/arch/sh/include/asm/unistd_32.h @@ -132,7 +132,7 @@ #define __NR_clone 120 #define __NR_setdomainname 121 #define __NR_uname 122 -#define __NR_modify_ldt 123 +#define __NR_cacheflush 123 #define __NR_adjtimex 124 #define __NR_mprotect 125 #define __NR_sigprocmask 126 diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h index a751699afda3..2b84bc916bc5 100644 --- a/arch/sh/include/asm/unistd_64.h +++ b/arch/sh/include/asm/unistd_64.h @@ -137,7 +137,7 @@ #define __NR_clone 120 #define __NR_setdomainname 121 #define __NR_uname 122 -#define __NR_modify_ldt 123 +#define __NR_cacheflush 123 #define __NR_adjtimex 124 #define __NR_mprotect 125 #define __NR_sigprocmask 126 diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index 90d00e47264d..ec65dd8842b1 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include static inline long do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, @@ -179,6 +181,47 @@ asmlinkage int sys_ipc(uint call, int first, int second, return -EINVAL; } +/* sys_cacheflush -- flush (part of) the processor cache. */ +asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op) +{ + struct vm_area_struct *vma; + + if ((op < 0) || (op > (CACHEFLUSH_D_PURGE|CACHEFLUSH_I))) + return -EINVAL; + + /* + * Verify that the specified address region actually belongs + * to this process. + */ + if (addr + len < addr) + return -EFAULT; + + down_read(¤t->mm->mmap_sem); + vma = find_vma (current->mm, addr); + if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) { + up_read(¤t->mm->mmap_sem); + return -EFAULT; + } + + switch (op & CACHEFLUSH_D_PURGE) { + case CACHEFLUSH_D_INVAL: + __flush_invalidate_region((void *)addr, len); + break; + case CACHEFLUSH_D_WB: + __flush_wback_region((void *)addr, len); + break; + case CACHEFLUSH_D_PURGE: + __flush_purge_region((void *)addr, len); + break; + } + + if (op & CACHEFLUSH_I) + flush_cache_all(); + + up_read(¤t->mm->mmap_sem); + return 0; +} + asmlinkage int sys_uname(struct old_utsname __user *name) { int err; diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S index f9e21fa2f592..16ba225ede89 100644 --- a/arch/sh/kernel/syscalls_32.S +++ b/arch/sh/kernel/syscalls_32.S @@ -139,7 +139,7 @@ ENTRY(sys_call_table) .long sys_clone /* 120 */ .long sys_setdomainname .long sys_newuname - .long sys_ni_syscall /* sys_modify_ldt */ + .long sys_cacheflush /* x86: sys_modify_ldt */ .long sys_adjtimex .long sys_mprotect /* 125 */ .long sys_sigprocmask diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S index bf420b616ae0..af6fb7410c21 100644 --- a/arch/sh/kernel/syscalls_64.S +++ b/arch/sh/kernel/syscalls_64.S @@ -143,7 +143,7 @@ sys_call_table: .long sys_clone /* 120 */ .long sys_setdomainname .long sys_newuname - .long sys_ni_syscall /* sys_modify_ldt */ + .long sys_cacheflush /* x86: sys_modify_ldt */ .long sys_adjtimex .long sys_mprotect /* 125 */ .long sys_sigprocmask -- cgit v1.2.3-59-g8ed1b From 6000fc4d6f3e55ad52cce8d76317187fe01af2aa Mon Sep 17 00:00:00 2001 From: Stuart Menefy Date: Mon, 24 Aug 2009 18:27:33 +0900 Subject: sh: Fixes some write posting issues in the interrupt handling for SH It is possible for the CPU to re-enable it's interrupt block bit before the write to the interrupt controller has actually masked out the external interupt at the controller. We get around this by reading back from the interrupt controller which will ensure the write has happened. Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/irq/ipr.c | 1 + drivers/sh/intc.c | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c index 808d99a48efb..c1508a90fc6a 100644 --- a/arch/sh/kernel/cpu/irq/ipr.c +++ b/arch/sh/kernel/cpu/irq/ipr.c @@ -35,6 +35,7 @@ static void disable_ipr_irq(unsigned int irq) unsigned long addr = get_ipr_desc(irq)->ipr_offsets[p->ipr_idx]; /* Set the priority in IPR to 0 */ __raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr); + (void)__raw_readw(addr); /* Read back to flush write posting */ } static void enable_ipr_irq(unsigned int irq) diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c index 3dd231a643b5..4b1ca9d28353 100644 --- a/drivers/sh/intc.c +++ b/drivers/sh/intc.c @@ -77,7 +77,7 @@ static unsigned long ack_handle[NR_IRQS]; static inline struct intc_desc_int *get_intc_desc(unsigned int irq) { struct irq_chip *chip = get_irq_chip(irq); - return (void *)((char *)chip - offsetof(struct intc_desc_int, chip)); + return container_of(chip, struct intc_desc_int, chip); } static inline unsigned int set_field(unsigned int value, @@ -95,16 +95,19 @@ static inline unsigned int set_field(unsigned int value, static void write_8(unsigned long addr, unsigned long h, unsigned long data) { __raw_writeb(set_field(0, data, h), addr); + (void)__raw_readb(addr); /* Defeat write posting */ } static void write_16(unsigned long addr, unsigned long h, unsigned long data) { __raw_writew(set_field(0, data, h), addr); + (void)__raw_readw(addr); /* Defeat write posting */ } static void write_32(unsigned long addr, unsigned long h, unsigned long data) { __raw_writel(set_field(0, data, h), addr); + (void)__raw_readl(addr); /* Defeat write posting */ } static void modify_8(unsigned long addr, unsigned long h, unsigned long data) @@ -112,6 +115,7 @@ static void modify_8(unsigned long addr, unsigned long h, unsigned long data) unsigned long flags; local_irq_save(flags); __raw_writeb(set_field(__raw_readb(addr), data, h), addr); + (void)__raw_readb(addr); /* Defeat write posting */ local_irq_restore(flags); } @@ -120,6 +124,7 @@ static void modify_16(unsigned long addr, unsigned long h, unsigned long data) unsigned long flags; local_irq_save(flags); __raw_writew(set_field(__raw_readw(addr), data, h), addr); + (void)__raw_readw(addr); /* Defeat write posting */ local_irq_restore(flags); } @@ -128,6 +133,7 @@ static void modify_32(unsigned long addr, unsigned long h, unsigned long data) unsigned long flags; local_irq_save(flags); __raw_writel(set_field(__raw_readl(addr), data, h), addr); + (void)__raw_readl(addr); /* Defeat write posting */ local_irq_restore(flags); } -- cgit v1.2.3-59-g8ed1b From 788e6af37a4ace8721eda72e4abe66fe0f6b49fd Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 24 Aug 2009 18:59:09 +0900 Subject: sh: fix sys_cacheflush error checking sys_cacheflush should return with EINVAL if the cache parameter is not one of ICACHE, DCACHE or BCACHE. So, we need to include 0 in the first check. It also adds the three definitions above as wrapper of the existent macros. PS: ltp cacheflush01 test now passes. Signed-off-by: Giuseppe Cavallaro Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/include/asm/cachectl.h | 7 +++++++ arch/sh/kernel/sys_sh.c | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/cachectl.h b/arch/sh/include/asm/cachectl.h index 305dd7082a66..6ffb4b7a212e 100644 --- a/arch/sh/include/asm/cachectl.h +++ b/arch/sh/include/asm/cachectl.h @@ -9,4 +9,11 @@ #define CACHEFLUSH_I 0x4 +/* + * Options for cacheflush system call + */ +#define ICACHE CACHEFLUSH_I /* flush instruction cache */ +#define DCACHE CACHEFLUSH_D_PURGE /* writeback and flush data cache */ +#define BCACHE (ICACHE|DCACHE) /* flush both caches */ + #endif /* _SH_CACHECTL_H */ diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index ec65dd8842b1..8aa5d1ceaf14 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c @@ -186,7 +186,7 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op) { struct vm_area_struct *vma; - if ((op < 0) || (op > (CACHEFLUSH_D_PURGE|CACHEFLUSH_I))) + if ((op <= 0) || (op > (CACHEFLUSH_D_PURGE|CACHEFLUSH_I))) return -EINVAL; /* -- cgit v1.2.3-59-g8ed1b From 05ecd5a1f76c183cca381705b3adb7d77c9a0439 Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Mon, 24 Aug 2009 19:52:38 +0900 Subject: sh: Simplify "multi-evt" interrupt handling. This patch changes the way in which "multi-evt" interrups are handled. The intc_evt2irq_table and related intc_evt2irq() have been removed and the "redirecting" handler is installed for the coupled interrupts. Thanks to that the do_IRQ() function don't have to use another level of indirection for all the interrupts... Signed-off-by: Pawel Moll Signed-off-by: Stuart Menefy Signed-off-by: Paul Mundt --- arch/sh/kernel/irq.c | 2 +- drivers/sh/intc.c | 54 ++++++++++++++++--------------------------------- include/linux/sh_intc.h | 1 - 3 files changed, 18 insertions(+), 39 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 278c68c60488..d1053392e287 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -114,7 +114,7 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs) #endif irq_enter(); - irq = irq_demux(intc_evt2irq(irq)); + irq = irq_demux(evt2irq(irq)); #ifdef CONFIG_IRQSTACKS curctx = (union irq_ctx *)current_thread_info(); diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c index 4b1ca9d28353..a9174ec72853 100644 --- a/drivers/sh/intc.c +++ b/drivers/sh/intc.c @@ -663,16 +663,9 @@ static unsigned int __init save_reg(struct intc_desc_int *d, return 0; } -static unsigned char *intc_evt2irq_table; - -unsigned int intc_evt2irq(unsigned int vector) +static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) { - unsigned int irq = evt2irq(vector); - - if (intc_evt2irq_table && intc_evt2irq_table[irq]) - irq = intc_evt2irq_table[irq]; - - return irq; + generic_handle_irq((unsigned int)get_irq_data(irq)); } void __init register_intc_controller(struct intc_desc *desc) @@ -745,34 +738,6 @@ void __init register_intc_controller(struct intc_desc *desc) BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ - /* keep the first vector only if same enum is used multiple times */ - for (i = 0; i < desc->nr_vectors; i++) { - struct intc_vect *vect = desc->vectors + i; - int first_irq = evt2irq(vect->vect); - - if (!vect->enum_id) - continue; - - for (k = i + 1; k < desc->nr_vectors; k++) { - struct intc_vect *vect2 = desc->vectors + k; - - if (vect->enum_id != vect2->enum_id) - continue; - - vect2->enum_id = 0; - - if (!intc_evt2irq_table) - intc_evt2irq_table = kzalloc(NR_IRQS, GFP_NOWAIT); - - if (!intc_evt2irq_table) { - pr_warning("intc: cannot allocate evt2irq!\n"); - continue; - } - - intc_evt2irq_table[evt2irq(vect2->vect)] = first_irq; - } - } - /* register the vectors one by one */ for (i = 0; i < desc->nr_vectors; i++) { struct intc_vect *vect = desc->vectors + i; @@ -789,6 +754,21 @@ void __init register_intc_controller(struct intc_desc *desc) } intc_register_irq(desc, d, vect->enum_id, irq); + + for (k = i + 1; k < desc->nr_vectors; k++) { + struct intc_vect *vect2 = desc->vectors + k; + unsigned int irq2 = evt2irq(vect2->vect); + + if (vect->enum_id != vect2->enum_id) + continue; + + vect2->enum_id = 0; + + /* redirect this interrupts to the first one */ + set_irq_chip_and_handler_name(irq2, &d->chip, + intc_redirect_irq, "redirect"); + set_irq_data(irq2, (void *)irq); + } } } diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h index eb1423a0078d..68e212ff9dde 100644 --- a/include/linux/sh_intc.h +++ b/include/linux/sh_intc.h @@ -85,7 +85,6 @@ struct intc_desc symbol __initdata = { \ } #endif -unsigned int intc_evt2irq(unsigned int vector); void __init register_intc_controller(struct intc_desc *desc); int intc_set_priority(unsigned int irq, unsigned int prio); -- cgit v1.2.3-59-g8ed1b From f13327864f94c3a0e6acca923df537d20059639f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 24 Aug 2009 22:48:27 +0900 Subject: sh: Move the FTRACE_SYSCALL_MAX definition in to asm/ftrace.h. Needed by ftrace changes in -tip. Signed-off-by: Paul Mundt --- arch/sh/include/asm/ftrace.h | 1 + arch/sh/kernel/ftrace.c | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 7e0bcc4d4a96..3c2ede3690d7 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h @@ -4,6 +4,7 @@ #ifdef CONFIG_FUNCTION_TRACER #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ +#define FTRACE_SYSCALL_MAX (NR_syscalls - 1) #ifndef __ASSEMBLY__ extern void mcount(void); diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 6647dfcb781d..a3dcc6d5d253 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -283,8 +283,6 @@ static struct syscall_metadata *find_syscall_meta(unsigned long *syscall) return NULL; } -#define FTRACE_SYSCALL_MAX (NR_syscalls - 1) - struct syscall_metadata *syscall_nr_to_meta(int nr) { if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0) -- cgit v1.2.3-59-g8ed1b From 0ceb4c3e3f1ccaf121851e33c3ea269b8ad0f219 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 25 Aug 2009 07:32:39 +0900 Subject: timekeeping: Fix up read_persistent_clock() breakage on sh The recent commit "timekeeping: Increase granularity of read_persistent_clock()" introduced read_persistent_clock() rework which inadvertently broke the sh conversion: arch/sh/kernel/time.c:45: error: passing argument 1 of 'rtc_sh_get_time' from incompatible pointer type distcc[13470] ERROR: compile arch/sh/kernel/time.c on sprygo/32 failed make[2]: *** [arch/sh/kernel/time.o] Error 1 This trivial fix gets it working again. Signed-off-by: Paul Mundt Cc: Martin Schwidefsky LKML-Reference: <20090824223239.GB20832@linux-sh.org> Signed-off-by: Ingo Molnar --- arch/sh/kernel/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index 3f4706aa975e..0e0e8581cf7a 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c @@ -41,7 +41,7 @@ int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; #ifdef CONFIG_GENERIC_CMOS_UPDATE void read_persistent_clock(struct timespec *ts) { - rtc_sh_get_time(&ts); + rtc_sh_get_time(ts); } int update_persistent_clock(struct timespec now) -- cgit v1.2.3-59-g8ed1b From b37c7c66f08df66ba7a8269b6d1af949ef8dbd95 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Wed, 26 Aug 2009 10:49:44 +0000 Subject: sh: fix CPU_SH7723/7724 numbering bug Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4/probe.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index 10e6795b56aa..afd3e73d5ad4 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -141,7 +141,7 @@ int __init detect_cpu_and_cache_system(void) case 0x300b: switch (prr) { case 0x20: - boot_cpu_data.type = CPU_SH7723; + boot_cpu_data.type = CPU_SH7724; boot_cpu_data.flags |= CPU_HAS_L2_CACHE; break; case 0x50: -- cgit v1.2.3-59-g8ed1b From 4aa5ac4ef44dd8c986241c54298abd7910b78b3f Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 28 Aug 2009 21:37:20 +0000 Subject: sh: Only shout about fixing up unexpected unaligned accesses Some unaligned accesses are completely expected. For example, the trapped_io code uses the unaligned access fixup code path so there's no need to warn about having to fixup the unaligned access. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/include/asm/system_32.h | 2 +- arch/sh/kernel/io_trapped.c | 3 ++- arch/sh/kernel/traps_32.c | 9 +++++---- 3 files changed, 8 insertions(+), 6 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h index 81e0d668eefd..d3ab269386bb 100644 --- a/arch/sh/include/asm/system_32.h +++ b/arch/sh/include/asm/system_32.h @@ -199,7 +199,7 @@ do { \ #endif int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, - struct mem_access *ma); + struct mem_access *ma, int); asmlinkage void do_address_error(struct pt_regs *regs, unsigned long writeaccess, diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c index e27a19e1f46e..69be603aa2d7 100644 --- a/arch/sh/kernel/io_trapped.c +++ b/arch/sh/kernel/io_trapped.c @@ -284,7 +284,8 @@ int handle_trapped_io(struct pt_regs *regs, unsigned long address) return 0; } - tmp = handle_unaligned_access(instruction, regs, &trapped_io_access); + tmp = handle_unaligned_access(instruction, regs, + &trapped_io_access, 1); set_fs(oldfs); return tmp == 0; } diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index c581dc31d92a..583ace55b0dc 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -449,7 +449,7 @@ static inline int handle_delayslot(struct pt_regs *regs, #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, - struct mem_access *ma) + struct mem_access *ma, int expected) { u_int rm; int ret, index; @@ -458,7 +458,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, rm = regs->regs[index]; /* shout about fixups */ - if (printk_ratelimit()) + if (!expected && printk_ratelimit()) printk(KERN_NOTICE "Fixing up unaligned %s access " "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", user_mode(regs) ? "userspace" : "kernel", @@ -657,7 +657,7 @@ fixup: set_fs(USER_DS); tmp = handle_unaligned_access(instruction, regs, - &user_mem_access); + &user_mem_access, 0); set_fs(oldfs); if (tmp==0) @@ -694,7 +694,8 @@ uspace_segv: die("insn faulting in do_address_error", regs, 0); } - handle_unaligned_access(instruction, regs, &user_mem_access); + handle_unaligned_access(instruction, regs, + &user_mem_access, 0); set_fs(oldfs); } } -- cgit v1.2.3-59-g8ed1b From 2f6dafc5fcbf3fddce345c47da1f277a156fe22a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 31 Aug 2009 13:47:06 +0900 Subject: sh: unwinder: Fix up uninitialized variable warnings on sh2a build. A couple of these popped up on the sh2a build, causing build failures. Signed-off-by: Paul Mundt --- arch/sh/include/asm/dwarf.h | 2 +- arch/sh/kernel/dwarf.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index c367ed3373c5..ced6795891a6 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h @@ -200,7 +200,7 @@ */ static __always_inline unsigned long dwarf_read_arch_reg(unsigned int reg) { - unsigned long value; + unsigned long value = 0; switch (reg) { case 14: diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 700f7e0fd658..bc4d8d75332b 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -887,7 +887,7 @@ static int __init dwarf_unwinder_init(void) { u32 entry_type; void *p, *entry; - int count, err; + int count, err = 0; unsigned long len; unsigned int c_entries, f_entries; unsigned char *end; -- cgit v1.2.3-59-g8ed1b From 1e1030dccb1084c8a38976d3656aab1d50d762da Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 1 Sep 2009 17:38:32 +0900 Subject: sh: nmi_debug support. This implements support for NMI debugging that was shamelessly copied from the avr32 port. A bit of special magic is needed in the interrupt exception path given that the NMI exception handler is stubbed in to the regular exception handling table despite being reported in INTEVT. So we mangle the lookup and kick off an EXPEVT-style exception dispatch from the INTEVT path for exceptions that do_IRQ() has no chance of handling. As a result, we also drop the evt2irq() conversion from the do_IRQ() path and just do it in assembly. Signed-off-by: Paul Mundt --- Documentation/kernel-parameters.txt | 2 +- arch/sh/include/asm/kdebug.h | 1 + arch/sh/include/asm/system.h | 2 +- arch/sh/kernel/Makefile | 7 ++-- arch/sh/kernel/cpu/sh3/entry.S | 26 +++++++++++++ arch/sh/kernel/cpu/sh3/ex.S | 4 +- arch/sh/kernel/irq.c | 2 +- arch/sh/kernel/nmi_debug.c | 77 +++++++++++++++++++++++++++++++++++++ arch/sh/kernel/traps.c | 21 ++++++++++ 9 files changed, 133 insertions(+), 9 deletions(-) create mode 100644 arch/sh/kernel/nmi_debug.c (limited to 'arch/sh/kernel') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 7936b801fe6a..76c355214dc3 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1514,7 +1514,7 @@ and is between 256 and 4096 characters. It is defined in the file of returning the full 64-bit number. The default is to return 64-bit inode numbers. - nmi_debug= [KNL,AVR32] Specify one or more actions to take + nmi_debug= [KNL,AVR32,SH] Specify one or more actions to take when a NMI is triggered. Format: [state][,regs][,debounce][,die] diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h index 0b9f896f203c..985219f9759e 100644 --- a/arch/sh/include/asm/kdebug.h +++ b/arch/sh/include/asm/kdebug.h @@ -4,6 +4,7 @@ /* Grossly misnamed. */ enum die_val { DIE_TRAP, + DIE_NMI, DIE_OOPS, }; diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h index 6b272238a46e..b5c5acdc8c0e 100644 --- a/arch/sh/include/asm/system.h +++ b/arch/sh/include/asm/system.h @@ -169,7 +169,7 @@ BUILD_TRAP_HANDLER(breakpoint); BUILD_TRAP_HANDLER(singlestep); BUILD_TRAP_HANDLER(fpu_error); BUILD_TRAP_HANDLER(fpu_state_restore); -BUILD_TRAP_HANDLER(unwinder); +BUILD_TRAP_HANDLER(nmi); #ifdef CONFIG_BUG extern void handle_BUG(struct pt_regs *); diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index f37cf02ad9be..a2d0a40f3848 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile @@ -10,9 +10,10 @@ CFLAGS_REMOVE_ftrace.o = -pg endif obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ - machvec.o process_$(BITS).o ptrace_$(BITS).o setup.o \ - signal_$(BITS).o sys_sh.o sys_sh$(BITS).o syscalls_$(BITS).o \ - time.o topology.o traps.o traps_$(BITS).o unwinder.o + machvec.o nmi_debug.o process_$(BITS).o ptrace_$(BITS).o \ + setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \ + syscalls_$(BITS).o time.o topology.o traps.o \ + traps_$(BITS).o unwinder.o obj-y += cpu/ obj-$(CONFIG_VSYSCALL) += vsyscall/ diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index aebd33d18ff7..d1142d365925 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -532,7 +532,33 @@ ENTRY(handle_interrupt) mov.l 2f, r4 mov.l 3f, r9 mov.l @r4, r4 ! pass INTEVT vector as arg0 + + shlr2 r4 + shlr r4 + mov r4, r0 ! save vector->jmp table offset for later + + shlr2 r4 ! vector to IRQ# conversion + add #-0x10, r4 + + cmp/pz r4 ! is it a valid IRQ? + bt 10f + + /* + * We got here as a result of taking the INTEVT path for something + * that isn't a valid hard IRQ, therefore we bypass the do_IRQ() + * path and special case the event dispatch instead. This is the + * expected path for the NMI (and any other brilliantly implemented + * exception), which effectively wants regular exception dispatch + * but is unfortunately reported through INTEVT rather than + * EXPEVT. Grr. + */ + mov.l 6f, r9 + mov.l @(r0, r9), r9 jmp @r9 + mov r15, r8 ! trap handlers take saved regs in r8 + +10: + jmp @r9 ! Off to do_IRQ() we go. mov r15, r5 ! pass saved registers as arg1 ENTRY(exception_none) diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S index e5a0de39a2db..46610c35c232 100644 --- a/arch/sh/kernel/cpu/sh3/ex.S +++ b/arch/sh/kernel/cpu/sh3/ex.S @@ -48,9 +48,7 @@ ENTRY(exception_handling_table) .long system_call ! Unconditional Trap /* 160 */ .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */ .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/ -ENTRY(nmi_slot) - .long kgdb_handle_exception /* 1C0 */ ! Allow trap to debugger -ENTRY(user_break_point_trap) + .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger .long break_point_trap /* 1E0 */ /* diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index d1053392e287..60f8af4497c7 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -114,7 +114,7 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs) #endif irq_enter(); - irq = irq_demux(evt2irq(irq)); + irq = irq_demux(irq); #ifdef CONFIG_IRQSTACKS curctx = (union irq_ctx *)current_thread_info(); diff --git a/arch/sh/kernel/nmi_debug.c b/arch/sh/kernel/nmi_debug.c new file mode 100644 index 000000000000..ff0abbd1e652 --- /dev/null +++ b/arch/sh/kernel/nmi_debug.c @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2007 Atmel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +enum nmi_action { + NMI_SHOW_STATE = 1 << 0, + NMI_SHOW_REGS = 1 << 1, + NMI_DIE = 1 << 2, + NMI_DEBOUNCE = 1 << 3, +}; + +static unsigned long nmi_actions; + +static int nmi_debug_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct die_args *args = data; + + if (likely(val != DIE_NMI)) + return NOTIFY_DONE; + + if (nmi_actions & NMI_SHOW_STATE) + show_state(); + if (nmi_actions & NMI_SHOW_REGS) + show_regs(args->regs); + if (nmi_actions & NMI_DEBOUNCE) + mdelay(10); + if (nmi_actions & NMI_DIE) + return NOTIFY_BAD; + + return NOTIFY_OK; +} + +static struct notifier_block nmi_debug_nb = { + .notifier_call = nmi_debug_notify, +}; + +static int __init nmi_debug_setup(char *str) +{ + char *p, *sep; + + register_die_notifier(&nmi_debug_nb); + + if (*str != '=') + return 0; + + for (p = str + 1; *p; p = sep + 1) { + sep = strchr(p, ','); + if (sep) + *sep = 0; + if (strcmp(p, "state") == 0) + nmi_actions |= NMI_SHOW_STATE; + else if (strcmp(p, "regs") == 0) + nmi_actions |= NMI_SHOW_REGS; + else if (strcmp(p, "debounce") == 0) + nmi_actions |= NMI_DEBOUNCE; + else if (strcmp(p, "die") == 0) + nmi_actions |= NMI_DIE; + else + printk(KERN_WARNING "NMI: Unrecognized action `%s'\n", + p); + if (!sep) + break; + } + + return 0; +} +__setup("nmi_debug", nmi_debug_setup); diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index f69bd968fcca..a8396f36bd14 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -91,3 +92,23 @@ BUILD_TRAP_HANDLER(bug) force_sig(SIGTRAP, current); } + +BUILD_TRAP_HANDLER(nmi) +{ + TRAP_HANDLER_DECL; + + nmi_enter(); + + switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) { + case NOTIFY_OK: + case NOTIFY_STOP: + break; + case NOTIFY_BAD: + die("Fatal Non-Maskable Interrupt", regs, SIGINT); + default: + printk(KERN_ALERT "Got NMI, but nobody cared. Ignoring...\n"); + break; + } + + nmi_exit(); +} -- cgit v1.2.3-59-g8ed1b From ade315d83c1d53b3c6b820134cb16601351810fe Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 1 Sep 2009 17:45:35 +0900 Subject: sh: Kill off kgdb's magical NMI debouncing. The kgdb stub has traditionally tied in to the NMI slot, and manually handled debounce. Now that we have a generic way to do this instead, all of the stub-specific debounce silliness can be killed off. Signed-off-by: Paul Mundt --- arch/sh/include/asm/kgdb.h | 3 --- arch/sh/kernel/cpu/sh3/entry.S | 31 ------------------------------- arch/sh/kernel/kgdb.c | 2 -- 3 files changed, 36 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/include/asm/kgdb.h b/arch/sh/include/asm/kgdb.h index 72704ed725e5..4235e228d921 100644 --- a/arch/sh/include/asm/kgdb.h +++ b/arch/sh/include/asm/kgdb.h @@ -30,9 +30,6 @@ static inline void arch_kgdb_breakpoint(void) __asm__ __volatile__ ("trapa #0x3c\n"); } -/* State info */ -extern char in_nmi; /* Debounce flag to prevent NMI reentry*/ - #define BUFMAX 2048 #define CACHE_FLUSH_IS_SAFE 1 diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index d1142d365925..0151933e5253 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -53,10 +53,6 @@ * syscall # * */ -#if defined(CONFIG_KGDB) -NMI_VEC = 0x1c0 ! Must catch early for debounce -#endif - /* Offsets to the stack */ OFF_R0 = 0 /* Return value. New ABI also arg4 */ OFF_R1 = 4 /* New ABI: arg5 */ @@ -71,7 +67,6 @@ OFF_PC = (16*4) OFF_SR = (16*4+8) OFF_TRA = (16*4+6*4) - #define k0 r0 #define k1 r1 #define k2 r2 @@ -281,21 +276,12 @@ restore_all: 6: or k0, k2 ! Set the IMASK-bits ldc k2, ssr ! -#if defined(CONFIG_KGDB) - ! Clear in_nmi - mov.l 6f, k0 - mov #0, k1 - mov.b k1, @k0 -#endif mov k4, r15 rte nop .align 2 5: .long 0x00001000 ! DSP -#ifdef CONFIG_KGDB -6: .long in_nmi -#endif 7: .long 0x30000000 ! common exception handler @@ -487,23 +473,6 @@ ENTRY(save_low_regs) ! .balign 512,0,512 ENTRY(handle_interrupt) -#if defined(CONFIG_KGDB) - mov.l 2f, k2 - ! Debounce (filter nested NMI) - mov.l @k2, k0 - mov.l 9f, k1 - cmp/eq k1, k0 - bf 11f - mov.l 10f, k1 - tas.b @k1 - bt 11f - rte - nop - .align 2 -9: .long NMI_VEC -10: .long in_nmi -11: -#endif /* defined(CONFIG_KGDB) */ sts pr, k3 ! save original pr value in k3 mova exception_data, k0 diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c index d29de7864f32..3e532d0d4a5c 100644 --- a/arch/sh/kernel/kgdb.c +++ b/arch/sh/kernel/kgdb.c @@ -15,8 +15,6 @@ #include #include -char in_nmi = 0; /* Set during NMI to prevent re-entry */ - /* Macros for single step instruction identification */ #define OPCODE_BT(op) (((op) & 0xff00) == 0x8900) #define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00) -- cgit v1.2.3-59-g8ed1b From 2fd5a021071ef54c503ab8d9894acae3eccf4f92 Mon Sep 17 00:00:00 2001 From: Rafael Ignacio Zurita Date: Mon, 14 Sep 2009 03:01:58 +0000 Subject: sh: clkfwk: remove bogus set_bus_parent() from SH7709. This fixes up broken clock re-parenting undertaken by the SH7709 clock framework code, which is currently in conflict with the legacy CPG framework. With this change in place, the legacy CPG ancestry is used, and we manage to avoid contending on the clock_list_sem mutex, which is already held under the legacy registration path, resulting in livelock. In order for SH7709 to fully support the varying clock modes, it needs to implement a more complete clock framework. After this change it is in sync with legacy CPG mode, which ends up being the default configuration for this CPU anyways. Signed-off-by: Rafael Ignacio Zurita Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/clock-sh7709.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c index fa30b6017730..e8749505bd2a 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh7709.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c @@ -22,13 +22,6 @@ static int stc_multipliers[] = { 1, 2, 4, 8, 3, 6, 1, 1 }; static int ifc_divisors[] = { 1, 2, 4, 1, 3, 1, 1, 1 }; static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 }; -static void set_bus_parent(struct clk *clk) -{ - struct clk *bus_clk = clk_get(NULL, "bus_clk"); - clk->parent = bus_clk; - clk_put(bus_clk); -} - static void master_clk_init(struct clk *clk) { int frqcr = ctrl_inw(FRQCR); @@ -50,9 +43,6 @@ static unsigned long module_clk_recalc(struct clk *clk) } static struct clk_ops sh7709_module_clk_ops = { -#ifdef CLOCK_MODE_0_1_2_7 - .init = set_bus_parent, -#endif .recalc = module_clk_recalc, }; @@ -78,7 +68,6 @@ static unsigned long cpu_clk_recalc(struct clk *clk) } static struct clk_ops sh7709_cpu_clk_ops = { - .init = set_bus_parent, .recalc = cpu_clk_recalc, }; -- cgit v1.2.3-59-g8ed1b From 3639dfb57d39747a3069678237e8ab810525fcb0 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 14 Sep 2009 17:29:35 +0900 Subject: sh: multi-evt support for SH-X3 proto CPU. This adds support for multiple vectors per unique IRQ masking source on the SH-X3 proto CPU. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/setup-shx3.c | 55 +++++++++++++----------------------- 1 file changed, 20 insertions(+), 35 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c index 07f078961c71..e848443deeb9 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c @@ -268,11 +268,7 @@ enum { UNUSED = 0, /* interrupt sources */ - IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, - IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, - IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, - IRL_HHLL, IRL_HHLH, IRL_HHHL, - IRQ0, IRQ1, IRQ2, IRQ3, + IRL, IRQ0, IRQ1, IRQ2, IRQ3, HUDII, TMU0, TMU1, TMU2, TMU3, TMU4, TMU5, PCII0, PCII1, PCII2, PCII3, PCII4, @@ -287,10 +283,7 @@ enum { DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11, DMAC1_DMAE, IIC, VIN0, VIN1, VCORE0, ATAPI, - DTU0_TEND, DTU0_AE, DTU0_TMISS, - DTU1_TEND, DTU1_AE, DTU1_TMISS, - DTU2_TEND, DTU2_AE, DTU2_TMISS, - DTU3_TEND, DTU3_AE, DTU3_TMISS, + DTU0, DTU1, DTU2, DTU3, FE0, FE1, GPIO0, GPIO1, GPIO2, GPIO3, PAM, IRM, @@ -298,8 +291,8 @@ enum { INTICI4, INTICI5, INTICI6, INTICI7, /* interrupt groups */ - IRL, PCII56789, SCIF0, SCIF1, SCIF2, SCIF3, - DMAC0, DMAC1, DTU0, DTU1, DTU2, DTU3, + PCII56789, SCIF0, SCIF1, SCIF2, SCIF3, + DMAC0, DMAC1, }; static struct intc_vect vectors[] __initdata = { @@ -332,14 +325,14 @@ static struct intc_vect vectors[] __initdata = { INTC_VECT(IIC, 0xae0), INTC_VECT(VIN0, 0xb00), INTC_VECT(VIN1, 0xb20), INTC_VECT(VCORE0, 0xb00), INTC_VECT(ATAPI, 0xb60), - INTC_VECT(DTU0_TEND, 0xc00), INTC_VECT(DTU0_AE, 0xc20), - INTC_VECT(DTU0_TMISS, 0xc40), - INTC_VECT(DTU1_TEND, 0xc60), INTC_VECT(DTU1_AE, 0xc80), - INTC_VECT(DTU1_TMISS, 0xca0), - INTC_VECT(DTU2_TEND, 0xcc0), INTC_VECT(DTU2_AE, 0xce0), - INTC_VECT(DTU2_TMISS, 0xd00), - INTC_VECT(DTU3_TEND, 0xd20), INTC_VECT(DTU3_AE, 0xd40), - INTC_VECT(DTU3_TMISS, 0xd60), + INTC_VECT(DTU0, 0xc00), INTC_VECT(DTU0, 0xc20), + INTC_VECT(DTU0, 0xc40), + INTC_VECT(DTU1, 0xc60), INTC_VECT(DTU1, 0xc80), + INTC_VECT(DTU1, 0xca0), + INTC_VECT(DTU2, 0xcc0), INTC_VECT(DTU2, 0xce0), + INTC_VECT(DTU2, 0xd00), + INTC_VECT(DTU3, 0xd20), INTC_VECT(DTU3, 0xd40), + INTC_VECT(DTU3, 0xd60), INTC_VECT(FE0, 0xe00), INTC_VECT(FE1, 0xe20), INTC_VECT(GPIO0, 0xe40), INTC_VECT(GPIO1, 0xe60), INTC_VECT(GPIO2, 0xe80), INTC_VECT(GPIO3, 0xea0), @@ -351,10 +344,6 @@ static struct intc_vect vectors[] __initdata = { }; static struct intc_group groups[] __initdata = { - INTC_GROUP(IRL, IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, - IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, - IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, - IRL_HHLL, IRL_HHLH, IRL_HHHL), INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9), INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), @@ -364,10 +353,6 @@ static struct intc_group groups[] __initdata = { DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE), INTC_GROUP(DMAC1, DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11), - INTC_GROUP(DTU0, DTU0_TEND, DTU0_AE, DTU0_TMISS), - INTC_GROUP(DTU1, DTU1_TEND, DTU1_AE, DTU1_TMISS), - INTC_GROUP(DTU2, DTU2_TEND, DTU2_AE, DTU2_TMISS), - INTC_GROUP(DTU3, DTU3_TEND, DTU3_AE, DTU3_TMISS), }; static struct intc_mask_reg mask_registers[] __initdata = { @@ -434,14 +419,14 @@ static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups, /* External interrupt pins in IRL mode */ static struct intc_vect vectors_irl[] __initdata = { - INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220), - INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260), - INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0), - INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0), - INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320), - INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360), - INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0), - INTC_VECT(IRL_HHHL, 0x3c0), + INTC_VECT(IRL, 0x200), INTC_VECT(IRL, 0x220), + INTC_VECT(IRL, 0x240), INTC_VECT(IRL, 0x260), + INTC_VECT(IRL, 0x280), INTC_VECT(IRL, 0x2a0), + INTC_VECT(IRL, 0x2c0), INTC_VECT(IRL, 0x2e0), + INTC_VECT(IRL, 0x300), INTC_VECT(IRL, 0x320), + INTC_VECT(IRL, 0x340), INTC_VECT(IRL, 0x360), + INTC_VECT(IRL, 0x380), INTC_VECT(IRL, 0x3a0), + INTC_VECT(IRL, 0x3c0), }; static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups, -- cgit v1.2.3-59-g8ed1b From 033eb0acddb71bbee32dce285256b3489b17ef7f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 15 Sep 2009 09:26:04 +0900 Subject: sh: cpufreq: Include CPU id in info messages. This tidies up the printks when running on SMP, and aids in debugging when certain cores are unable to be scaled. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpufreq.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/cpufreq.c b/arch/sh/kernel/cpufreq.c index e0590ffebd73..dce4f3ff0932 100644 --- a/arch/sh/kernel/cpufreq.c +++ b/arch/sh/kernel/cpufreq.c @@ -82,7 +82,8 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) cpuclk = clk_get(NULL, "cpu_clk"); if (IS_ERR(cpuclk)) { - printk(KERN_ERR "cpufreq: couldn't get CPU clk\n"); + printk(KERN_ERR "cpufreq: couldn't get CPU#%d clk\n", + policy->cpu); return PTR_ERR(cpuclk); } @@ -95,22 +96,21 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->min = policy->cpuinfo.min_freq; policy->max = policy->cpuinfo.max_freq; - /* * Catch the cases where the clock framework hasn't been wired up * properly to support scaling. */ if (unlikely(policy->min == policy->max)) { printk(KERN_ERR "cpufreq: clock framework rate rounding " - "not supported on this CPU.\n"); + "not supported on CPU#%d.\n", policy->cpu); clk_put(cpuclk); return -EINVAL; } - printk(KERN_INFO "cpufreq: Frequencies - Minimum %u.%03u MHz, " + printk(KERN_INFO "cpufreq: CPU#%d Frequencies - Minimum %u.%03u MHz, " "Maximum %u.%03u MHz.\n", - policy->min / 1000, policy->min % 1000, + policy->cpu, policy->min / 1000, policy->min % 1000, policy->max / 1000, policy->max % 1000); return 0; -- cgit v1.2.3-59-g8ed1b From a74f7e0410da9da7942cf4f08bbdb0e4fc76f972 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 16 Sep 2009 14:30:34 +0900 Subject: sh: Wire up HAVE_SYSCALL_TRACEPOINTS. This is necessary to get ftrace syscall tracing working again.. a fairly trivial and mechanical change. The one benefit is that this can also be enabled on sh64, despite not having its own ftrace port. Signed-off-by: Paul Mundt --- arch/sh/Kconfig | 1 + arch/sh/include/asm/thread_info.h | 8 ++++---- arch/sh/kernel/ptrace_32.c | 15 ++++++--------- arch/sh/kernel/ptrace_64.c | 9 +++++++++ 4 files changed, 20 insertions(+), 13 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 2f5352c06a0e..4df3570fe511 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -20,6 +20,7 @@ config SUPERH select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_LZMA + select HAVE_SYSCALL_TRACEPOINTS select RTC_LIB select GENERIC_ATOMIC64 help diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 5123bcaa8509..bdeb9d46d17d 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -116,7 +116,7 @@ extern void free_thread_info(struct thread_info *ti); #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SECCOMP 6 /* secure computing */ #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ -#define TIF_SYSCALL_FTRACE 8 /* for ftrace syscall instrumentation */ +#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_MEMDIE 18 @@ -130,7 +130,7 @@ extern void free_thread_info(struct thread_info *ti); #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) -#define _TIF_SYSCALL_FTRACE (1 << TIF_SYSCALL_FTRACE) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_USEDFPU (1 << TIF_USEDFPU) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_FREEZE (1 << TIF_FREEZE) @@ -144,13 +144,13 @@ extern void free_thread_info(struct thread_info *ti); /* work to do in syscall trace */ #define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ - _TIF_SYSCALL_FTRACE) + _TIF_SYSCALL_TRACEPOINT) /* work to do on any return to u-space */ #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \ - _TIF_NOTIFY_RESUME | _TIF_SYSCALL_FTRACE) + _TIF_NOTIFY_RESUME | _TIF_SYSCALL_TRACEPOINT) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index f4f58e438a26..9be35f348093 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -34,7 +34,8 @@ #include #include -#include +#define CREATE_TRACE_POINTS +#include /* * This routine will get a word off of the process kernel stack. @@ -461,10 +462,8 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) */ ret = -1L; -#ifdef CONFIG_FTRACE_SYSCALLS - if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) - ftrace_syscall_enter(regs); -#endif + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->regs[0]); if (unlikely(current->audit_context)) audit_syscall_entry(audit_arch(), regs->regs[3], @@ -482,10 +481,8 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]), regs->regs[0]); -#ifdef CONFIG_FTRACE_SYSCALLS - if (unlikely(test_thread_flag(TIF_SYSCALL_FTRACE))) - ftrace_syscall_exit(regs); -#endif + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_exit(regs, regs->regs[0]); step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c index 695097438f02..952da83903da 100644 --- a/arch/sh/kernel/ptrace_64.c +++ b/arch/sh/kernel/ptrace_64.c @@ -40,6 +40,9 @@ #include #include +#define CREATE_TRACE_POINTS +#include + /* This mask defines the bits of the SR which the user is not allowed to change, which are everything except S, Q, M, PR, SZ, FR. */ #define SR_MASK (0xffff8cfd) @@ -438,6 +441,9 @@ asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs) */ ret = -1LL; + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->regs[9]); + if (unlikely(current->audit_context)) audit_syscall_entry(audit_arch(), regs->regs[1], regs->regs[2], regs->regs[3], @@ -452,6 +458,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]), regs->regs[9]); + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_exit(regs, regs->regs[9]); + if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, 0); } -- cgit v1.2.3-59-g8ed1b From 480c646c32f368dfa2a076f7a120a7795e786e62 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 16 Sep 2009 15:06:39 +0900 Subject: sh: Kill off unused se_skipped in alignment trap notification code. Nothing is using this, resulting in a build error with certain configurations. Just kill it off. Signed-off-by: Paul Mundt --- arch/sh/kernel/traps_32.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/sh/kernel') diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c index 583ace55b0dc..6aba9af79eaf 100644 --- a/arch/sh/kernel/traps_32.c +++ b/arch/sh/kernel/traps_32.c @@ -47,7 +47,6 @@ static unsigned long se_user; static unsigned long se_sys; -static unsigned long se_skipped; static unsigned long se_half; static unsigned long se_word; static unsigned long se_dword; @@ -77,7 +76,6 @@ proc_alignment_read(char *page, char **start, off_t off, int count, int *eof, p += sprintf(p, "User:\t\t%lu\n", se_user); p += sprintf(p, "System:\t\t%lu\n", se_sys); - p += sprintf(p, "Skipped:\t%lu\n", se_skipped); p += sprintf(p, "Half:\t\t%lu\n", se_half); p += sprintf(p, "Word:\t\t%lu\n", se_word); p += sprintf(p, "DWord:\t\t%lu\n", se_dword); -- cgit v1.2.3-59-g8ed1b