From 1d5cfcdff793e2f34ec61d902fa5ee0c7e4a2208 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 16 Feb 2010 21:43:38 +0900 Subject: sh: Kill off some superfluous legacy PMB special casing. The __va()/__pa() offsets and the boot memory offsets are consistent for all PMB users, so there is no need to special case these for legacy PMB. Kill the special casing off and depend on CONFIG_PMB across the board. This also fixes up yet another addressing bug for sh64. Signed-off-by: Paul Mundt --- arch/sh/include/asm/page.h | 7 +------ arch/sh/kernel/vmlinux.lds.S | 7 +++---- 2 files changed, 4 insertions(+), 10 deletions(-) (limited to 'arch/sh') diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index 61e58105adc3..3accdc5ab122 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -127,12 +127,7 @@ typedef struct page *pgtable_t; * is not visible (it is part of the PMB mapping) and so needs to be * added or subtracted as required. */ -#if defined(CONFIG_PMB_LEGACY) -/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */ -#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START)) -#define __pa(x) ((unsigned long)(x) - PMB_OFFSET) -#define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET)) -#elif defined(CONFIG_32BIT) +#ifdef CONFIG_PMB #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) #else diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S index 0e66c7b30e0f..7f8a709c3ada 100644 --- a/arch/sh/kernel/vmlinux.lds.S +++ b/arch/sh/kernel/vmlinux.lds.S @@ -14,11 +14,10 @@ OUTPUT_ARCH(sh) #include #include -#if defined(CONFIG_29BIT) || defined(CONFIG_SUPERH64) || \ - defined(CONFIG_PMB_LEGACY) - #define MEMORY_OFFSET __MEMORY_START +#ifdef CONFIG_PMB + #define MEMORY_OFFSET 0 #else - #define MEMORY_OFFSET 0 + #define MEMORY_OFFSET __MEMORY_START #endif ENTRY(_start) -- cgit v1.2.3-59-g8ed1b