From b1576fec7f4dd4657694fefc97fda4cf28ec68e9 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Tue, 4 Feb 2014 16:04:35 +1100 Subject: powerpc: No need to use dot symbols when branching to a function binutils is smart enough to know that a branch to a function descriptor is actually a branch to the functions text address. Alan tells me that binutils has been doing this for 9 years. Signed-off-by: Anton Blanchard --- arch/powerpc/mm/hash_low_64.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/mm/hash_low_64.S') diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 1136d26a95ae..8bf7537a7f53 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -159,7 +159,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) BEGIN_FTR_SECTION mr r4,r30 mr r5,r7 - bl .hash_page_do_lazy_icache + bl hash_page_do_lazy_icache END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) /* At this point, r3 contains new PP bits, save them in @@ -471,7 +471,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) BEGIN_FTR_SECTION mr r4,r30 mr r5,r7 - bl .hash_page_do_lazy_icache + bl hash_page_do_lazy_icache END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) /* At this point, r3 contains new PP bits, save them in @@ -588,7 +588,7 @@ htab_inval_old_hpte: li r6,MMU_PAGE_64K /* psize */ ld r7,STK_PARAM(R9)(r1) /* ssize */ ld r8,STK_PARAM(R8)(r1) /* local */ - bl .flush_hash_page + bl flush_hash_page /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ lis r0,_PAGE_HPTE_SUB@h ori r0,r0,_PAGE_HPTE_SUB@l @@ -812,7 +812,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) BEGIN_FTR_SECTION mr r4,r30 mr r5,r7 - bl .hash_page_do_lazy_icache + bl hash_page_do_lazy_icache END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE) /* At this point, r3 contains new PP bits, save them in -- cgit v1.2.3-59-g8ed1b From b86206e4c32cbe6ac3de1c6dc52c2d64bcf461cb Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 10 Mar 2014 09:44:22 +1100 Subject: powerpc: Fix branch patching code for ABIv2 The MMU hashtable and SLB branch patching code uses function pointers for the update sites. This creates a difference between ABIv1 and ABIv2 because we don't have function descriptors on ABIv2. Get rid of the function pointer and just point at the update sites directly. This works on both ABIs. Signed-off-by: Anton Blanchard --- arch/powerpc/mm/hash_low_64.S | 36 ++++++++++++++++++++++++------------ arch/powerpc/mm/hash_utils_64.c | 20 +++++++++----------- arch/powerpc/mm/slb.c | 12 ++++++------ arch/powerpc/mm/slb_low.S | 12 ++++++++---- 4 files changed, 47 insertions(+), 33 deletions(-) (limited to 'arch/powerpc/mm/hash_low_64.S') diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 8bf7537a7f53..057cbbb4c576 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -201,7 +201,8 @@ htab_insert_pte: li r8,MMU_PAGE_4K /* page size */ li r9,MMU_PAGE_4K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(htab_call_hpte_insert1) +.globl htab_call_hpte_insert1 +htab_call_hpte_insert1: bl . /* Patched by htab_finish_init() */ cmpdi 0,r3,0 bge htab_pte_insert_ok /* Insertion successful */ @@ -225,7 +226,8 @@ _GLOBAL(htab_call_hpte_insert1) li r8,MMU_PAGE_4K /* page size */ li r9,MMU_PAGE_4K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(htab_call_hpte_insert2) +.globl htab_call_hpte_insert2 +htab_call_hpte_insert2: bl . /* Patched by htab_finish_init() */ cmpdi 0,r3,0 bge+ htab_pte_insert_ok /* Insertion successful */ @@ -242,7 +244,8 @@ _GLOBAL(htab_call_hpte_insert2) 2: and r0,r5,r27 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_remove */ -_GLOBAL(htab_call_hpte_remove) +.globl htab_call_hpte_remove +htab_call_hpte_remove: bl . /* Patched by htab_finish_init() */ /* Try all again */ @@ -296,7 +299,8 @@ htab_modify_pte: li r7,MMU_PAGE_4K /* actual page size */ ld r8,STK_PARAM(R9)(r1) /* segment size */ ld r9,STK_PARAM(R8)(r1) /* get "local" param */ -_GLOBAL(htab_call_hpte_updatepp) +.globl htab_call_hpte_updatepp +htab_call_hpte_updatepp: bl . /* Patched by htab_finish_init() */ /* if we failed because typically the HPTE wasn't really here @@ -526,7 +530,8 @@ htab_special_pfn: li r8,MMU_PAGE_4K /* page size */ li r9,MMU_PAGE_4K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(htab_call_hpte_insert1) +.globl htab_call_hpte_insert1 +htab_call_hpte_insert1: bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 bge htab_pte_insert_ok /* Insertion successful */ @@ -554,7 +559,8 @@ _GLOBAL(htab_call_hpte_insert1) li r8,MMU_PAGE_4K /* page size */ li r9,MMU_PAGE_4K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(htab_call_hpte_insert2) +.globl htab_call_hpte_insert2 +htab_call_hpte_insert2: bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 bge+ htab_pte_insert_ok /* Insertion successful */ @@ -571,7 +577,8 @@ _GLOBAL(htab_call_hpte_insert2) 2: and r0,r5,r27 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_remove */ -_GLOBAL(htab_call_hpte_remove) +.globl htab_call_hpte_remove +htab_call_hpte_remove: bl . /* patched by htab_finish_init() */ /* Try all again */ @@ -660,7 +667,8 @@ htab_modify_pte: li r7,MMU_PAGE_4K /* actual page size */ ld r8,STK_PARAM(R9)(r1) /* segment size */ ld r9,STK_PARAM(R8)(r1) /* get "local" param */ -_GLOBAL(htab_call_hpte_updatepp) +.globl htab_call_hpte_updatepp +htab_call_hpte_updatepp: bl . /* patched by htab_finish_init() */ /* if we failed because typically the HPTE wasn't really here @@ -857,7 +865,8 @@ ht64_insert_pte: li r8,MMU_PAGE_64K li r9,MMU_PAGE_64K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(ht64_call_hpte_insert1) +.globl ht64_call_hpte_insert1 +ht64_call_hpte_insert1: bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 bge ht64_pte_insert_ok /* Insertion successful */ @@ -881,7 +890,8 @@ _GLOBAL(ht64_call_hpte_insert1) li r8,MMU_PAGE_64K li r9,MMU_PAGE_64K /* actual page size */ ld r10,STK_PARAM(R9)(r1) /* segment size */ -_GLOBAL(ht64_call_hpte_insert2) +.globl ht64_call_hpte_insert2 +ht64_call_hpte_insert2: bl . /* patched by htab_finish_init() */ cmpdi 0,r3,0 bge+ ht64_pte_insert_ok /* Insertion successful */ @@ -898,7 +908,8 @@ _GLOBAL(ht64_call_hpte_insert2) 2: and r0,r5,r27 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ /* Call ppc_md.hpte_remove */ -_GLOBAL(ht64_call_hpte_remove) +.globl ht64_call_hpte_remove +ht64_call_hpte_remove: bl . /* patched by htab_finish_init() */ /* Try all again */ @@ -952,7 +963,8 @@ ht64_modify_pte: li r7,MMU_PAGE_64K /* actual page size */ ld r8,STK_PARAM(R9)(r1) /* segment size */ ld r9,STK_PARAM(R8)(r1) /* get "local" param */ -_GLOBAL(ht64_call_hpte_updatepp) +.globl ht64_call_hpte_updatepp +ht64_call_hpte_updatepp: bl . /* patched by htab_finish_init() */ /* if we failed because typically the HPTE wasn't really here diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 49fc935ee807..d685dff382c9 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -603,19 +603,18 @@ int remove_section_mapping(unsigned long start, unsigned long end) } #endif /* CONFIG_MEMORY_HOTPLUG */ +extern u32 htab_call_hpte_insert1[]; +extern u32 htab_call_hpte_insert2[]; +extern u32 htab_call_hpte_remove[]; +extern u32 htab_call_hpte_updatepp[]; +extern u32 ht64_call_hpte_insert1[]; +extern u32 ht64_call_hpte_insert2[]; +extern u32 ht64_call_hpte_remove[]; +extern u32 ht64_call_hpte_updatepp[]; + static void __init htab_finish_init(void) { - extern unsigned int *htab_call_hpte_insert1; - extern unsigned int *htab_call_hpte_insert2; - extern unsigned int *htab_call_hpte_remove; - extern unsigned int *htab_call_hpte_updatepp; - #ifdef CONFIG_PPC_HAS_HASH_64K - extern unsigned int *ht64_call_hpte_insert1; - extern unsigned int *ht64_call_hpte_insert2; - extern unsigned int *ht64_call_hpte_remove; - extern unsigned int *ht64_call_hpte_updatepp; - patch_branch(ht64_call_hpte_insert1, ppc_function_entry(ppc_md.hpte_insert), BRANCH_SET_LINK); @@ -628,7 +627,6 @@ static void __init htab_finish_init(void) patch_branch(ht64_call_hpte_updatepp, ppc_function_entry(ppc_md.hpte_updatepp), BRANCH_SET_LINK); - #endif /* CONFIG_PPC_HAS_HASH_64K */ patch_branch(htab_call_hpte_insert1, diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 9d1d33cd2be5..4623366f82e9 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -256,10 +256,14 @@ static inline void patch_slb_encoding(unsigned int *insn_addr, patch_instruction(insn_addr, insn); } +extern u32 slb_compare_rr_to_size[]; +extern u32 slb_miss_kernel_load_linear[]; +extern u32 slb_miss_kernel_load_io[]; +extern u32 slb_compare_rr_to_size[]; +extern u32 slb_miss_kernel_load_vmemmap[]; + void slb_set_size(u16 size) { - extern unsigned int *slb_compare_rr_to_size; - if (mmu_slb_size == size) return; @@ -272,11 +276,7 @@ void slb_initialize(void) unsigned long linear_llp, vmalloc_llp, io_llp; unsigned long lflags, vflags; static int slb_encoding_inited; - extern unsigned int *slb_miss_kernel_load_linear; - extern unsigned int *slb_miss_kernel_load_io; - extern unsigned int *slb_compare_rr_to_size; #ifdef CONFIG_SPARSEMEM_VMEMMAP - extern unsigned int *slb_miss_kernel_load_vmemmap; unsigned long vmemmap_llp; #endif diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 17aa6dfceb34..28cffb68c2e1 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -59,7 +59,8 @@ _GLOBAL(slb_allocate_realmode) /* Linear mapping encoding bits, the "li" instruction below will * be patched by the kernel at boot */ -_GLOBAL(slb_miss_kernel_load_linear) +.globl slb_miss_kernel_load_linear +slb_miss_kernel_load_linear: li r11,0 /* * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 @@ -79,7 +80,8 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) /* Check virtual memmap region. To be patches at kernel boot */ cmpldi cr0,r9,0xf bne 1f -_GLOBAL(slb_miss_kernel_load_vmemmap) +.globl slb_miss_kernel_load_vmemmap +slb_miss_kernel_load_vmemmap: li r11,0 b 6f 1: @@ -95,7 +97,8 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) b 6f 5: /* IO mapping */ - _GLOBAL(slb_miss_kernel_load_io) +.globl slb_miss_kernel_load_io +slb_miss_kernel_load_io: li r11,0 6: /* @@ -250,7 +253,8 @@ slb_finish_load: 7: ld r10,PACASTABRR(r13) addi r10,r10,1 /* This gets soft patched on boot. */ -_GLOBAL(slb_compare_rr_to_size) +.globl slb_compare_rr_to_size +slb_compare_rr_to_size: cmpldi r10,0 blt+ 4f -- cgit v1.2.3-59-g8ed1b