aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/entry_64.S41
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/powerpc/kernel/perf_event.c17
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c1
-rw-r--r--arch/powerpc/kernel/vdso.c11
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S4
7 files changed, 46 insertions, 32 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index f9fd54bfcc84..9763267e38b4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -658,42 +658,43 @@ do_work:
cmpdi r0,0
crandc eq,cr1*4+eq,eq
bne restore
- /* here we are preempting the current task */
-1:
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl .trace_hardirqs_on
- /* Note: we just clobbered r10 which used to contain the previous
- * MSR before the hard-disabling done by the caller of do_work.
- * We don't have that value anymore, but it doesn't matter as
- * we will hard-enable unconditionally, we can just reload the
- * current MSR into r10
+
+ /* Here we are preempting the current task.
+ *
+ * Ensure interrupts are soft-disabled. We also properly mark
+ * the PACA to reflect the fact that they are hard-disabled
+ * and trace the change
*/
- mfmsr r10
-#endif /* CONFIG_TRACE_IRQFLAGS */
- li r0,1
+ li r0,0
stb r0,PACASOFTIRQEN(r13)
stb r0,PACAHARDIRQEN(r13)
+ TRACE_DISABLE_INTS
+
+ /* Call the scheduler with soft IRQs off */
+1: bl .preempt_schedule_irq
+
+ /* Hard-disable interrupts again (and update PACA) */
#ifdef CONFIG_PPC_BOOK3E
- wrteei 1
- bl .preempt_schedule
wrteei 0
#else
- ori r10,r10,MSR_EE
- mtmsrd r10,1 /* reenable interrupts */
- bl .preempt_schedule
mfmsr r10
- clrrdi r9,r1,THREAD_SHIFT
- rldicl r10,r10,48,1 /* disable interrupts again */
+ rldicl r10,r10,48,1
rotldi r10,r10,16
mtmsrd r10,1
#endif /* CONFIG_PPC_BOOK3E */
+ li r0,0
+ stb r0,PACAHARDIRQEN(r13)
+
+ /* Re-test flags and eventually loop */
+ clrrdi r9,r1,THREAD_SHIFT
ld r4,TI_FLAGS(r9)
andi. r0,r4,_TIF_NEED_RESCHED
bne 1b
b restore
user_work:
-#endif
+#endif /* CONFIG_PREEMPT */
+
/* Enable interrupts */
#ifdef CONFIG_PPC_BOOK3E
wrteei 1
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index ba949a2c93ac..ccf56ac92de5 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -97,7 +97,9 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
* to do an appropriate TLB flush here too
*/
if (bus->self) {
+#ifdef CONFIG_PPC_STD_MMU_64
struct resource *res = bus->resource[0];
+#endif
pr_debug("IO unmapping for PCI-PCI bridge %s\n",
pci_name(bus->self));
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index bbcbae183e92..87f1663584b0 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -116,20 +116,23 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
+ unsigned long sihv = MMCRA_SIHV;
+ unsigned long sipr = MMCRA_SIPR;
if (TRAP(regs) != 0xf00)
return 0; /* not a PMU interrupt */
if (ppmu->flags & PPMU_ALT_SIPR) {
- if (mmcra & POWER6_MMCRA_SIHV)
- return PERF_RECORD_MISC_HYPERVISOR;
- return (mmcra & POWER6_MMCRA_SIPR) ?
- PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL;
+ sihv = POWER6_MMCRA_SIHV;
+ sipr = POWER6_MMCRA_SIPR;
}
- if (mmcra & MMCRA_SIHV)
+
+ /* PR has priority over HV, so order below is important */
+ if (mmcra & sipr)
+ return PERF_RECORD_MISC_USER;
+ if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV))
return PERF_RECORD_MISC_HYPERVISOR;
- return (mmcra & MMCRA_SIPR) ? PERF_RECORD_MISC_USER :
- PERF_RECORD_MISC_KERNEL;
+ return PERF_RECORD_MISC_KERNEL;
}
/*
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 2ec1eaed19ca..c930ac38e59f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1172,7 +1172,7 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
unsigned long base = mm->brk;
unsigned long ret;
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_STD_MMU_64
/*
* If we are using 1TB segments and we are allowed to randomise
* the heap, we can put it above 1TB so it is backed by a 1TB
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 797ea95aae2e..04f638d82fb3 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -57,7 +57,6 @@
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/mmu-hash64.h>
#include <asm/firmware.h>
#include <asm/xmon.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 94e2df3cae07..137dc22afa42 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -50,6 +50,9 @@
/* Max supported size for symbol names */
#define MAX_SYMNAME 64
+/* The alignment of the vDSO */
+#define VDSO_ALIGNMENT (1 << 16)
+
extern char vdso32_start, vdso32_end;
static void *vdso32_kbase = &vdso32_start;
static unsigned int vdso32_pages;
@@ -231,15 +234,21 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
* pick a base address for the vDSO in process space. We try to put it
* at vdso_base which is the "natural" base for it, but we might fail
* and end up putting it elsewhere.
+ * Add enough to the size so that the result can be aligned.
*/
down_write(&mm->mmap_sem);
vdso_base = get_unmapped_area(NULL, vdso_base,
- vdso_pages << PAGE_SHIFT, 0, 0);
+ (vdso_pages << PAGE_SHIFT) +
+ ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+ 0, 0);
if (IS_ERR_VALUE(vdso_base)) {
rc = vdso_base;
goto fail_mmapsem;
}
+ /* Add required alignment. */
+ vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
+
/*
* Put vDSO base into mm struct. We need to do this before calling
* install_special_mapping or the perf counter mmap tracking code
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 904ef1360dd7..0546bcd49cd0 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -25,7 +25,7 @@ SECTIONS
. = ALIGN(16);
.text : {
*(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
- }
+ } :text
PROVIDE(__etext = .);
PROVIDE(_etext = .);
PROVIDE(etext = .);
@@ -56,7 +56,7 @@ SECTIONS
.fixup : { *(.fixup) }
.dynamic : { *(.dynamic) } :text :dynamic
- .got : { *(.got) }
+ .got : { *(.got) } :text
.plt : { *(.plt) }
_end = .;