aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/dma-iommu.c10
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c4
-rw-r--r--arch/powerpc/kernel/dma.c10
-rw-r--r--arch/powerpc/kernel/entry_32.S39
-rw-r--r--arch/powerpc/kernel/entry_64.S92
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S6
-rw-r--r--arch/powerpc/kernel/fadump.c2
-rw-r--r--arch/powerpc/kernel/head_44x.S8
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S8
-rw-r--r--arch/powerpc/kernel/ibmebus.c10
-rw-r--r--arch/powerpc/kernel/idle.c23
-rw-r--r--arch/powerpc/kernel/init_task.c29
-rw-r--r--arch/powerpc/kernel/irq.c35
-rw-r--r--arch/powerpc/kernel/kgdb.c1
-rw-r--r--arch/powerpc/kernel/machine_kexec.c7
-rw-r--r--arch/powerpc/kernel/misc_32.S203
-rw-r--r--arch/powerpc/kernel/paca.c3
-rw-r--r--arch/powerpc/kernel/process.c56
-rw-r--r--arch/powerpc/kernel/prom_init.c17
-rw-r--r--arch/powerpc/kernel/ptrace.c44
-rw-r--r--arch/powerpc/kernel/ptrace32.c32
-rw-r--r--arch/powerpc/kernel/setup_32.c3
-rw-r--r--arch/powerpc/kernel/smp.c76
-rw-r--r--arch/powerpc/kernel/traps.c10
-rw-r--r--arch/powerpc/kernel/vector.S10
-rw-r--r--arch/powerpc/kernel/vio.c287
28 files changed, 607 insertions, 424 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index f5808a35688c..83afacd3ba7b 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -28,7 +28,7 @@ endif
obj-y := cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \
- init_task.o process.o systbl.o idle.o \
+ process.o systbl.o idle.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o dma.o \
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 34b8afe94a50..4554dc2fe857 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -188,10 +188,6 @@ int main(void)
DEFINE(SLBSHADOW_STACKESID,
offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid));
DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
- DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
- DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
- DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
- DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use));
DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count));
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 3f6464b4d970..bcfdcd22c766 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -17,7 +17,8 @@
* to the dma address (mapping) of the first page.
*/
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
dma_handle, dev->coherent_dma_mask, flag,
@@ -25,7 +26,8 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
}
static void dma_iommu_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
}
@@ -105,8 +107,8 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
}
struct dma_map_ops dma_iommu_ops = {
- .alloc_coherent = dma_iommu_alloc_coherent,
- .free_coherent = dma_iommu_free_coherent,
+ .alloc = dma_iommu_alloc_coherent,
+ .free = dma_iommu_free_coherent,
.map_sg = dma_iommu_map_sg,
.unmap_sg = dma_iommu_unmap_sg,
.dma_supported = dma_iommu_dma_supported,
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 1ebc9189aada..4ab88dafb235 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -47,8 +47,8 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
* for everything else.
*/
struct dma_map_ops swiotlb_dma_ops = {
- .alloc_coherent = dma_direct_alloc_coherent,
- .free_coherent = dma_direct_free_coherent,
+ .alloc = dma_direct_alloc_coherent,
+ .free = dma_direct_free_coherent,
.map_sg = swiotlb_map_sg_attrs,
.unmap_sg = swiotlb_unmap_sg_attrs,
.dma_supported = swiotlb_dma_supported,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 7d0233c12ee3..b1ec983dcec8 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -26,7 +26,8 @@
void *dma_direct_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
void *ret;
#ifdef CONFIG_NOT_COHERENT_CACHE
@@ -54,7 +55,8 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
}
void dma_direct_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
#ifdef CONFIG_NOT_COHERENT_CACHE
__dma_free_coherent(size, vaddr);
@@ -150,8 +152,8 @@ static inline void dma_direct_sync_single(struct device *dev,
#endif
struct dma_map_ops dma_direct_ops = {
- .alloc_coherent = dma_direct_alloc_coherent,
- .free_coherent = dma_direct_free_coherent,
+ .alloc = dma_direct_alloc_coherent,
+ .free = dma_direct_free_coherent,
.map_sg = dma_direct_map_sg,
.unmap_sg = dma_direct_unmap_sg,
.dma_supported = dma_direct_dma_supported,
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 3e57a00b8cba..ba3aeb4bc06a 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -206,40 +206,43 @@ reenable_mmu: /* re-enable mmu so we can */
andi. r10,r10,MSR_EE /* Did EE change? */
beq 1f
- /* Save handler and return address into the 2 unused words
- * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
- * else can be recovered from the pt_regs except r3 which for
- * normal interrupts has been set to pt_regs and for syscalls
- * is an argument, so we temporarily use ORIG_GPR3 to save it
- */
- stw r9,8(r1)
- stw r11,12(r1)
- stw r3,ORIG_GPR3(r1)
/*
* The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
* If from user mode there is only one stack frame on the stack, and
* accessing CALLER_ADDR1 will cause oops. So we need create a dummy
* stack frame to make trace_hardirqs_off happy.
+ *
+ * This is handy because we also need to save a bunch of GPRs,
+ * r3 can be different from GPR3(r1) at this point, r9 and r11
+ * contains the old MSR and handler address respectively,
+ * r4 & r5 can contain page fault arguments that need to be passed
+ * along as well. r12, CCR, CTR, XER etc... are left clobbered as
+ * they aren't useful past this point (aren't syscall arguments),
+ * the rest is restored from the exception frame.
*/
+ stwu r1,-32(r1)
+ stw r9,8(r1)
+ stw r11,12(r1)
+ stw r3,16(r1)
+ stw r4,20(r1)
+ stw r5,24(r1)
andi. r12,r12,MSR_PR
- beq 11f
- stwu r1,-16(r1)
+ b 11f
bl trace_hardirqs_off
- addi r1,r1,16
b 12f
-
11:
bl trace_hardirqs_off
12:
+ lwz r5,24(r1)
+ lwz r4,20(r1)
+ lwz r3,16(r1)
+ lwz r11,12(r1)
+ lwz r9,8(r1)
+ addi r1,r1,32
lwz r0,GPR0(r1)
- lwz r3,ORIG_GPR3(r1)
- lwz r4,GPR4(r1)
- lwz r5,GPR5(r1)
lwz r6,GPR6(r1)
lwz r7,GPR7(r1)
lwz r8,GPR8(r1)
- lwz r9,8(r1)
- lwz r11,12(r1)
1: mtctr r11
mtlr r9
bctr /* jump to handler */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index f8a7a1a1a9f4..ed1718feb9d9 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -63,15 +63,9 @@ system_call_common:
std r0,GPR0(r1)
std r10,GPR1(r1)
ACCOUNT_CPU_USER_ENTRY(r10, r11)
- /*
- * This "crclr so" clears CR0.SO, which is the error indication on
- * return from this system call. There must be no cmp instruction
- * between it and the "mfcr r9" below, otherwise if XER.SO is set,
- * CR0.SO will get set, causing all system calls to appear to fail.
- */
- crclr so
std r2,GPR2(r1)
std r3,GPR3(r1)
+ mfcr r2
std r4,GPR4(r1)
std r5,GPR5(r1)
std r6,GPR6(r1)
@@ -82,18 +76,20 @@ system_call_common:
std r11,GPR10(r1)
std r11,GPR11(r1)
std r11,GPR12(r1)
+ std r11,_XER(r1)
+ std r11,_CTR(r1)
std r9,GPR13(r1)
- mfcr r9
mflr r10
+ /*
+ * This clears CR0.SO (bit 28), which is the error indication on
+ * return from this system call.
+ */
+ rldimi r2,r11,28,(63-28)
li r11,0xc01
- std r9,_CCR(r1)
std r10,_LINK(r1)
std r11,_TRAP(r1)
- mfxer r9
- mfctr r10
- std r9,_XER(r1)
- std r10,_CTR(r1)
std r3,ORIG_GPR3(r1)
+ std r2,_CCR(r1)
ld r2,PACATOC(r13)
addi r9,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
@@ -154,7 +150,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
ld r10,TI_FLAGS(r11)
andi. r11,r10,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace
-syscall_dotrace_cont:
+.Lsyscall_dotrace_cont:
cmpldi 0,r0,NR_syscalls
bge- syscall_enosys
@@ -211,7 +207,7 @@ syscall_exit:
cmpld r3,r11
ld r5,_CCR(r1)
bge- syscall_error
-syscall_error_cont:
+.Lsyscall_error_cont:
ld r7,_NIP(r1)
BEGIN_FTR_SECTION
stdcx. r0,0,r1 /* to clear the reservation */
@@ -246,7 +242,7 @@ syscall_error:
oris r5,r5,0x1000 /* Set SO bit in CR */
neg r3,r3
std r5,_CCR(r1)
- b syscall_error_cont
+ b .Lsyscall_error_cont
/* Traced system call support */
syscall_dotrace:
@@ -268,7 +264,7 @@ syscall_dotrace:
addi r9,r1,STACK_FRAME_OVERHEAD
clrrdi r10,r1,THREAD_SHIFT
ld r10,TI_FLAGS(r10)
- b syscall_dotrace_cont
+ b .Lsyscall_dotrace_cont
syscall_enosys:
li r3,-ENOSYS
@@ -588,23 +584,19 @@ _GLOBAL(ret_from_except_lite)
fast_exc_return_irq:
restore:
/*
- * This is the main kernel exit path, we first check if we
- * have to change our interrupt state.
+ * This is the main kernel exit path. First we check if we
+ * are about to re-enable interrupts
*/
ld r5,SOFTE(r1)
lbz r6,PACASOFTIRQEN(r13)
- cmpwi cr1,r5,0
- cmpw cr0,r5,r6
- beq cr0,4f
+ cmpwi cr0,r5,0
+ beq restore_irq_off
- /* We do, handle disable first, which is easy */
- bne cr1,3f;
- li r0,0
- stb r0,PACASOFTIRQEN(r13);
- TRACE_DISABLE_INTS
- b 4f
+ /* We are enabling, were we already enabled ? Yes, just return */
+ cmpwi cr0,r6,1
+ beq cr0,do_restore
-3: /*
+ /*
* We are about to soft-enable interrupts (we are hard disabled
* at this point). We check if there's anything that needs to
* be replayed first.
@@ -626,7 +618,7 @@ restore_no_replay:
/*
* Final return path. BookE is handled in a different file
*/
-4:
+do_restore:
#ifdef CONFIG_PPC_BOOK3E
b .exception_return_book3e
#else
@@ -700,6 +692,25 @@ fast_exception_return:
#endif /* CONFIG_PPC_BOOK3E */
/*
+ * We are returning to a context with interrupts soft disabled.
+ *
+ * However, we may also about to hard enable, so we need to
+ * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
+ * or that bit can get out of sync and bad things will happen
+ */
+restore_irq_off:
+ ld r3,_MSR(r1)
+ lbz r7,PACAIRQHAPPENED(r13)
+ andi. r0,r3,MSR_EE
+ beq 1f
+ rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
+ stb r7,PACAIRQHAPPENED(r13)
+1: li r0,0
+ stb r0,PACASOFTIRQEN(r13);
+ TRACE_DISABLE_INTS
+ b do_restore
+
+ /*
* Something did happen, check if a re-emit is needed
* (this also clears paca->irq_happened)
*/
@@ -748,6 +759,9 @@ restore_check_irq_replay:
#endif /* CONFIG_PPC_BOOK3E */
1: b .ret_from_except /* What else to do here ? */
+
+
+3:
do_work:
#ifdef CONFIG_PREEMPT
andi. r0,r3,MSR_PR /* Returning to user mode? */
@@ -767,16 +781,6 @@ do_work:
SOFT_DISABLE_INTS(r3,r4)
1: bl .preempt_schedule_irq
- /* Hard-disable interrupts again (and update PACA) */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 0
-#else
- ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
- mtmsrd r10,1
-#endif /* CONFIG_PPC_BOOK3E */
- li r0,PACA_IRQ_HARD_DIS
- stb r0,PACAIRQHAPPENED(r13)
-
/* Re-test flags and eventually loop */
clrrdi r9,r1,THREAD_SHIFT
ld r4,TI_FLAGS(r9)
@@ -787,14 +791,6 @@ do_work:
user_work:
#endif /* CONFIG_PREEMPT */
- /* Enable interrupts */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 1
-#else
- ori r10,r10,MSR_EE
- mtmsrd r10,1
-#endif /* CONFIG_PPC_BOOK3E */
-
andi. r0,r4,_TIF_NEED_RESCHED
beq 1f
bl .restore_interrupts
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index cb705fdbb458..f7bed44ee165 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -94,12 +94,10 @@ machine_check_pSeries_1:
data_access_pSeries:
HMT_MEDIUM
SET_SCRATCH0(r13)
-#ifndef CONFIG_POWER4_ONLY
BEGIN_FTR_SECTION
b data_access_check_stab
data_access_not_stab:
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
-#endif
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
KVMTEST, 0x300)
@@ -301,7 +299,6 @@ machine_check_fwnmi:
EXC_STD, KVMTEST, 0x200)
KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
-#ifndef CONFIG_POWER4_ONLY
/* moved from 0x300 */
data_access_check_stab:
GET_PACA(r13)
@@ -328,7 +325,6 @@ do_stab_bolted_pSeries:
GET_SCRATCH0(r10)
std r10,PACA_EXSLB+EX_R13(r13)
EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
-#endif /* CONFIG_POWER4_ONLY */
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
@@ -768,8 +764,8 @@ alignment_common:
std r3,_DAR(r1)
std r4,_DSISR(r1)
bl .save_nvgprs
+ DISABLE_INTS
addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
bl .alignment_exception
b .ret_from_except
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index cfe7a38708c3..18bdf74fa164 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -40,6 +40,8 @@
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/fadump.h>
+#include <asm/debug.h>
+#include <asm/setup.h>
static struct fw_dump fw_dump;
static struct fadump_mem_struct fdm;
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 7dd2981bcc50..22d608e8bb7d 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -778,14 +778,6 @@ _GLOBAL(__fixup_440A_mcheck)
blr
/*
- * extern void giveup_altivec(struct task_struct *prev)
- *
- * The 44x core does not have an AltiVec unit.
- */
-_GLOBAL(giveup_altivec)
- blr
-
-/*
* extern void giveup_fpu(struct task_struct *prev)
*
* The 44x core does not have an FPU.
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 28e62598d0e8..de80e0f9a2bd 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -874,14 +874,6 @@ _GLOBAL(__setup_e500mc_ivors)
sync
blr
-/*
- * extern void giveup_altivec(struct task_struct *prev)
- *
- * The e500 core does not have an AltiVec unit.
- */
-_GLOBAL(giveup_altivec)
- blr
-
#ifdef CONFIG_SPE
/*
* extern void giveup_spe(struct task_struct *prev)
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 79bb282e6501..b01d14eeca8d 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -65,7 +65,8 @@ static struct of_device_id __initdata ibmebus_matches[] = {
static void *ibmebus_alloc_coherent(struct device *dev,
size_t size,
dma_addr_t *dma_handle,
- gfp_t flag)
+ gfp_t flag,
+ struct dma_attrs *attrs)
{
void *mem;
@@ -77,7 +78,8 @@ static void *ibmebus_alloc_coherent(struct device *dev,
static void ibmebus_free_coherent(struct device *dev,
size_t size, void *vaddr,
- dma_addr_t dma_handle)
+ dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
kfree(vaddr);
}
@@ -136,8 +138,8 @@ static u64 ibmebus_dma_get_required_mask(struct device *dev)
}
static struct dma_map_ops ibmebus_dma_ops = {
- .alloc_coherent = ibmebus_alloc_coherent,
- .free_coherent = ibmebus_free_coherent,
+ .alloc = ibmebus_alloc_coherent,
+ .free = ibmebus_free_coherent,
.map_sg = ibmebus_map_sg,
.unmap_sg = ibmebus_unmap_sg,
.dma_supported = ibmebus_dma_supported,
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 6d2209ac0c44..2099d9a879e8 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -113,29 +113,6 @@ void cpu_idle(void)
}
}
-
-/*
- * cpu_idle_wait - Used to ensure that all the CPUs come out of the old
- * idle loop and start using the new idle loop.
- * Required while changing idle handler on SMP systems.
- * Caller must have changed idle handler to the new value before the call.
- * This window may be larger on shared systems.
- */
-void cpu_idle_wait(void)
-{
- int cpu;
- smp_mb();
-
- /* kick all the CPUs so that they exit out of old idle routine */
- get_online_cpus();
- for_each_online_cpu(cpu) {
- if (cpu != smp_processor_id())
- smp_send_reschedule(cpu);
- }
- put_online_cpus();
-}
-EXPORT_SYMBOL_GPL(cpu_idle_wait);
-
int powersave_nap;
#ifdef CONFIG_SYSCTL
diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c
deleted file mode 100644
index d076d465dbd1..000000000000
--- a/arch/powerpc/kernel/init_task.c
+++ /dev/null
@@ -1,29 +0,0 @@
-#include <linux/mm.h>
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/init_task.h>
-#include <linux/fs.h>
-#include <linux/mqueue.h>
-#include <asm/uaccess.h>
-
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-/*
- * Initial thread structure.
- *
- * We need to make sure that this is 16384-byte aligned due to the
- * way process stacks are handled. This is done by having a special
- * "init_task" linker map entry..
- */
-union thread_union init_thread_union __init_task_data =
- { INIT_THREAD_INFO(init_task) };
-
-/*
- * Initial task structure.
- *
- * All other task structs will be allocated on slabs in fork.c
- */
-struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_task);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 243dbabfe74d..7835a5e1ea5f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -229,6 +229,19 @@ notrace void arch_local_irq_restore(unsigned long en)
*/
if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
__hard_irq_disable();
+#ifdef CONFIG_TRACE_IRQFLAG
+ else {
+ /*
+ * We should already be hard disabled here. We had bugs
+ * where that wasn't the case so let's dbl check it and
+ * warn if we are wrong. Only do that when IRQ tracing
+ * is enabled as mfmsr() can be costly.
+ */
+ if (WARN_ON(mfmsr() & MSR_EE))
+ __hard_irq_disable();
+ }
+#endif /* CONFIG_TRACE_IRQFLAG */
+
set_soft_enabled(0);
/*
@@ -260,11 +273,17 @@ EXPORT_SYMBOL(arch_local_irq_restore);
* if they are currently disabled. This is typically called before
* schedule() or do_signal() when returning to userspace. We do it
* in C to avoid the burden of dealing with lockdep etc...
+ *
+ * NOTE: This is called with interrupts hard disabled but not marked
+ * as such in paca->irq_happened, so we need to resync this.
*/
void restore_interrupts(void)
{
- if (irqs_disabled())
+ if (irqs_disabled()) {
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
local_irq_enable();
+ } else
+ __hard_irq_enable();
}
#endif /* CONFIG_PPC64 */
@@ -330,14 +349,10 @@ void migrate_irqs(void)
alloc_cpumask_var(&mask, GFP_KERNEL);
- for_each_irq(irq) {
+ for_each_irq_desc(irq, desc) {
struct irq_data *data;
struct irq_chip *chip;
- desc = irq_to_desc(irq);
- if (!desc)
- continue;
-
data = irq_desc_get_irq_data(desc);
if (irqd_is_per_cpu(data))
continue;
@@ -560,12 +575,6 @@ void do_softirq(void)
local_irq_restore(flags);
}
-irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
-{
- return d->hwirq;
-}
-EXPORT_SYMBOL_GPL(irqd_to_hwirq);
-
irq_hw_number_t virq_to_hw(unsigned int virq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
@@ -578,7 +587,7 @@ int irq_choose_cpu(const struct cpumask *mask)
{
int cpuid;
- if (cpumask_equal(mask, cpu_all_mask)) {
+ if (cpumask_equal(mask, cpu_online_mask)) {
static int irq_rover;
static DEFINE_RAW_SPINLOCK(irq_rover_lock);
unsigned long flags;
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 76a6e40a6f7c..782bd0a3c2f0 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -24,6 +24,7 @@
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/machdep.h>
+#include <asm/debug.h>
/*
* This table contains the mapping between PowerPC hardware trap types, and
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index c957b1202bdc..5df777794403 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -23,14 +23,11 @@
void machine_kexec_mask_interrupts(void) {
unsigned int i;
+ struct irq_desc *desc;
- for_each_irq(i) {
- struct irq_desc *desc = irq_to_desc(i);
+ for_each_irq_desc(i, desc) {
struct irq_chip *chip;
- if (!desc)
- continue;
-
chip = irq_desc_get_chip(desc);
if (!chip)
continue;
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 7cd07b42ca1a..386d57f66f28 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -738,8 +738,23 @@ relocate_new_kernel:
mr r5, r31
li r0, 0
-#elif defined(CONFIG_44x) && !defined(CONFIG_PPC_47x)
+#elif defined(CONFIG_44x)
+ /* Save our parameters */
+ mr r29, r3
+ mr r30, r4
+ mr r31, r5
+
+#ifdef CONFIG_PPC_47x
+ /* Check for 47x cores */
+ mfspr r3,SPRN_PVR
+ srwi r3,r3,16
+ cmplwi cr0,r3,PVR_476@h
+ beq setup_map_47x
+ cmplwi cr0,r3,PVR_476_ISS@h
+ beq setup_map_47x
+#endif /* CONFIG_PPC_47x */
+
/*
* Code for setting up 1:1 mapping for PPC440x for KEXEC
*
@@ -753,16 +768,15 @@ relocate_new_kernel:
* 5) Invalidate the tmp mapping.
*
* - Based on the kexec support code for FSL BookE
- * - Doesn't support 47x yet.
*
*/
- /* Save our parameters */
- mr r29, r3
- mr r30, r4
- mr r31, r5
- /* Load our MSR_IS and TID to MMUCR for TLB search */
- mfspr r3,SPRN_PID
+ /*
+ * Load the PID with kernel PID (0).
+ * Also load our MSR_IS and TID to MMUCR for TLB search.
+ */
+ li r3, 0
+ mtspr SPRN_PID, r3
mfmsr r4
andi. r4,r4,MSR_IS@l
beq wmmucr
@@ -900,6 +914,179 @@ next_tlb:
li r3, 0
tlbwe r3, r24, PPC44x_TLB_PAGEID
sync
+ b ppc44x_map_done
+
+#ifdef CONFIG_PPC_47x
+
+ /* 1:1 mapping for 47x */
+
+setup_map_47x:
+
+ /*
+ * Load the kernel pid (0) to PID and also to MMUCR[TID].
+ * Also set the MSR IS->MMUCR STS
+ */
+ li r3, 0
+ mtspr SPRN_PID, r3 /* Set PID */
+ mfmsr r4 /* Get MSR */
+ andi. r4, r4, MSR_IS@l /* TS=1? */
+ beq 1f /* If not, leave STS=0 */
+ oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */
+1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */
+ sync
+
+ /* Find the entry we are running from */
+ bl 2f
+2: mflr r23
+ tlbsx r23, 0, r23
+ tlbre r24, r23, 0 /* TLB Word 0 */
+ tlbre r25, r23, 1 /* TLB Word 1 */
+ tlbre r26, r23, 2 /* TLB Word 2 */
+
+
+ /*
+ * Invalidates all the tlb entries by writing to 256 RPNs(r4)
+ * of 4k page size in all 4 ways (0-3 in r3).
+ * This would invalidate the entire UTLB including the one we are
+ * running from. However the shadow TLB entries would help us
+ * to continue the execution, until we flush them (rfi/isync).
+ */
+ addis r3, 0, 0x8000 /* specify the way */
+ addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */
+ addi r5, 0, 0
+ b clear_utlb_entry
+
+ /* Align the loop to speed things up. from head_44x.S */
+ .align 6
+
+clear_utlb_entry:
+
+ tlbwe r4, r3, 0
+ tlbwe r5, r3, 1
+ tlbwe r5, r3, 2
+ addis r3, r3, 0x2000 /* Increment the way */
+ cmpwi r3, 0
+ bne clear_utlb_entry
+ addis r3, 0, 0x8000
+ addis r4, r4, 0x100 /* Increment the EPN */
+ cmpwi r4, 0
+ bne clear_utlb_entry
+
+ /* Create the entries in the other address space */
+ mfmsr r5
+ rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */
+ xori r7, r7, 1 /* r7 = !TS */
+
+ insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */
+
+ /*
+ * write out the TLB entries for the tmp mapping
+ * Use way '0' so that we could easily invalidate it later.
+ */
+ lis r3, 0x8000 /* Way '0' */
+
+ tlbwe r24, r3, 0
+ tlbwe r25, r3, 1
+ tlbwe r26, r3, 2
+
+ /* Update the msr to the new TS */
+ insrwi r5, r7, 1, 26
+
+ bl 1f
+1: mflr r6
+ addi r6, r6, (2f-1b)
+
+ mtspr SPRN_SRR0, r6
+ mtspr SPRN_SRR1, r5
+ rfi
+
+ /*
+ * Now we are in the tmp address space.
+ * Create a 1:1 mapping for 0-2GiB in the original TS.
+ */
+2:
+ li r3, 0
+ li r4, 0 /* TLB Word 0 */
+ li r5, 0 /* TLB Word 1 */
+ li r6, 0
+ ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */
+
+ li r8, 0 /* PageIndex */
+
+ xori r7, r7, 1 /* revert back to original TS */
+
+write_utlb:
+ rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */
+ /* ERPN = 0 as we don't use memory above 2G */
+
+ mr r4, r5 /* EPN = RPN */
+ ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
+ insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */
+
+ tlbwe r4, r3, 0 /* Write out the entries */
+ tlbwe r5, r3, 1
+ tlbwe r6, r3, 2
+ addi r8, r8, 1
+ cmpwi r8, 8 /* Have we completed ? */
+ bne write_utlb
+
+ /* make sure we complete the TLB write up */
+ isync
+
+ /*
+ * Prepare to jump to the 1:1 mapping.
+ * 1) Extract page size of the tmp mapping
+ * DSIZ = TLB_Word0[22:27]
+ * 2) Calculate the physical address of the address
+ * to jump to.
+ */
+ rlwinm r10, r24, 0, 22, 27
+
+ cmpwi r10, PPC47x_TLB0_4K
+ bne 0f
+ li r10, 0x1000 /* r10 = 4k */
+ bl 1f
+
+0:
+ /* Defaults to 256M */
+ lis r10, 0x1000
+
+ bl 1f
+1: mflr r4
+ addi r4, r4, (2f-1b) /* virtual address of 2f */
+
+ subi r11, r10, 1 /* offsetmask = Pagesize - 1 */
+ not r10, r11 /* Pagemask = ~(offsetmask) */
+
+ and r5, r25, r10 /* Physical page */
+ and r6, r4, r11 /* offset within the current page */
+
+ or r5, r5, r6 /* Physical address for 2f */
+
+ /* Switch the TS in MSR to the original one */
+ mfmsr r8
+ insrwi r8, r7, 1, 26
+
+ mtspr SPRN_SRR1, r8
+ mtspr SPRN_SRR0, r5
+ rfi
+
+2:
+ /* Invalidate the tmp mapping */
+ lis r3, 0x8000 /* Way '0' */
+
+ clrrwi r24, r24, 12 /* Clear the valid bit */
+ tlbwe r24, r3, 0
+ tlbwe r25, r3, 1
+ tlbwe r26, r3, 2
+
+ /* Make sure we complete the TLB write and flush the shadow TLB */
+ isync
+
+#endif
+
+ppc44x_map_done:
+
/* Restore the parameters */
mr r3, r29
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 0bb1f98613ba..fbe1a12dc7f1 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -36,10 +36,7 @@ struct lppaca lppaca[] = {
[0 ... (NR_LPPACAS-1)] = {
.desc = 0xd397d781, /* "LpPa" */
.size = sizeof(struct lppaca),
- .dyn_proc_status = 2,
- .decr_val = 0x00ff0000,
.fpregs_in_use = 1,
- .end_of_quantum = 0xfffffffffffffffful,
.slb_count = 64,
.vmxregs_in_use = 0,
.page_ins = 0,
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index f88698c0f332..710f400476de 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -124,7 +124,7 @@ void enable_kernel_altivec(void)
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
giveup_altivec(current);
else
- giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
+ giveup_altivec_notask();
#else
giveup_altivec(last_task_used_altivec);
#endif /* CONFIG_SMP */
@@ -711,18 +711,21 @@ release_thread(struct task_struct *t)
}
/*
- * This gets called before we allocate a new thread and copy
- * the current task into it.
+ * this gets called so that we can store coprocessor state into memory and
+ * copy the current task into the new thread.
*/
-void prepare_to_copy(struct task_struct *tsk)
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- flush_fp_to_thread(current);
- flush_altivec_to_thread(current);
- flush_vsx_to_thread(current);
- flush_spe_to_thread(current);
+ flush_fp_to_thread(src);
+ flush_altivec_to_thread(src);
+ flush_vsx_to_thread(src);
+ flush_spe_to_thread(src);
#ifdef CONFIG_HAVE_HW_BREAKPOINT
- flush_ptrace_hw_breakpoint(tsk);
+ flush_ptrace_hw_breakpoint(src);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+ *dst = *src;
+ return 0;
}
/*
@@ -1235,7 +1238,7 @@ void __ppc64_runlatch_on(void)
ctrl |= CTRL_RUNLATCH;
mtspr(SPRN_CTRLT, ctrl);
- ti->local_flags |= TLF_RUNLATCH;
+ ti->local_flags |= _TLF_RUNLATCH;
}
/* Called with hard IRQs off */
@@ -1244,7 +1247,7 @@ void __ppc64_runlatch_off(void)
struct thread_info *ti = current_thread_info();
unsigned long ctrl;
- ti->local_flags &= ~TLF_RUNLATCH;
+ ti->local_flags &= ~_TLF_RUNLATCH;
ctrl = mfspr(SPRN_CTRLF);
ctrl &= ~CTRL_RUNLATCH;
@@ -1252,37 +1255,6 @@ void __ppc64_runlatch_off(void)
}
#endif /* CONFIG_PPC64 */
-#if THREAD_SHIFT < PAGE_SHIFT
-
-static struct kmem_cache *thread_info_cache;
-
-struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
-{
- struct thread_info *ti;
-
- ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
- if (unlikely(ti == NULL))
- return NULL;
-#ifdef CONFIG_DEBUG_STACK_USAGE
- memset(ti, 0, THREAD_SIZE);
-#endif
- return ti;
-}
-
-void free_thread_info(struct thread_info *ti)
-{
- kmem_cache_free(thread_info_cache, ti);
-}
-
-void thread_info_cache_init(void)
-{
- thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
- THREAD_SIZE, 0, NULL);
- BUG_ON(thread_info_cache == NULL);
-}
-
-#endif /* THREAD_SHIFT < PAGE_SHIFT */
-
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 99860273211b..1b488e5305c5 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -680,6 +680,9 @@ static void __init early_cmdline_parse(void)
#define OV3_VMX 0x40 /* VMX/Altivec */
#define OV3_DFP 0x20 /* decimal FP */
+/* Option vector 4: IBM PAPR implementation */
+#define OV4_MIN_ENT_CAP 0x01 /* minimum VP entitled capacity */
+
/* Option vector 5: PAPR/OF options supported */
#define OV5_LPAR 0x80 /* logical partitioning supported */
#define OV5_SPLPAR 0x40 /* shared-processor LPAR supported */
@@ -701,6 +704,8 @@ static void __init early_cmdline_parse(void)
#define OV5_XCMO 0x00
#endif
#define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */
+#define OV5_PFO_HW_RNG 0x80 /* PFO Random Number Generator */
+#define OV5_PFO_HW_ENCR 0x20 /* PFO Encryption Accelerator */
/* Option Vector 6: IBM PAPR hints */
#define OV6_LINUX 0x02 /* Linux is our OS */
@@ -744,11 +749,12 @@ static unsigned char ibm_architecture_vec[] = {
OV3_FP | OV3_VMX | OV3_DFP,
/* option vector 4: IBM PAPR implementation */
- 2 - 2, /* length */
+ 3 - 2, /* length */
0, /* don't halt */
+ OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
/* option vector 5: PAPR/OF options */
- 13 - 2, /* length */
+ 18 - 2, /* length */
0, /* don't ignore, don't halt */
OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
OV5_DONATE_DEDICATE_CPU | OV5_MSI,
@@ -762,8 +768,13 @@ static unsigned char ibm_architecture_vec[] = {
* must match by the macro below. Update the definition if
* the structure layout changes.
*/
-#define IBM_ARCH_VEC_NRCORES_OFFSET 100
+#define IBM_ARCH_VEC_NRCORES_OFFSET 101
W(NR_CPUS), /* number of cores supported */
+ 0,
+ 0,
+ 0,
+ 0,
+ OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR,
/* option vector 6: IBM PAPR hints */
4 - 2, /* length */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 8d8e028893be..c10fc28b9092 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1432,40 +1432,6 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
#endif
}
-/*
- * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
- * we mark them as obsolete now, they will be removed in a future version
- */
-static long arch_ptrace_old(struct task_struct *child, long request,
- unsigned long addr, unsigned long data)
-{
- void __user *datavp = (void __user *) data;
-
- switch (request) {
- case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
- return copy_regset_to_user(child, &user_ppc_native_view,
- REGSET_GPR, 0, 32 * sizeof(long),
- datavp);
-
- case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
- return copy_regset_from_user(child, &user_ppc_native_view,
- REGSET_GPR, 0, 32 * sizeof(long),
- datavp);
-
- case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
- return copy_regset_to_user(child, &user_ppc_native_view,
- REGSET_FPR, 0, 32 * sizeof(double),
- datavp);
-
- case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
- return copy_regset_from_user(child, &user_ppc_native_view,
- REGSET_FPR, 0, 32 * sizeof(double),
- datavp);
- }
-
- return -EPERM;
-}
-
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
@@ -1687,14 +1653,6 @@ long arch_ptrace(struct task_struct *child, long request,
datavp);
#endif
- /* Old reverse args ptrace callss */
- case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
- case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
- case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
- case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
- ret = arch_ptrace_old(child, request, addr, data);
- break;
-
default:
ret = ptrace_request(child, request, addr, data);
break;
@@ -1710,7 +1668,7 @@ long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
- secure_computing(regs->gpr[0]);
+ secure_computing_strict(regs->gpr[0]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 469349d14a97..8c21658719d9 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -39,30 +39,6 @@
* in exit.c or in signal.c.
*/
-/*
- * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
- * we mark them as obsolete now, they will be removed in a future version
- */
-static long compat_ptrace_old(struct task_struct *child, long request,
- long addr, long data)
-{
- switch (request) {
- case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
- return copy_regset_to_user(child,
- task_user_regset_view(current), 0,
- 0, 32 * sizeof(compat_long_t),
- compat_ptr(data));
-
- case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
- return copy_regset_from_user(child,
- task_user_regset_view(current), 0,
- 0, 32 * sizeof(compat_long_t),
- compat_ptr(data));
- }
-
- return -EPERM;
-}
-
/* Macros to workout the correct index for the FPR in the thread struct */
#define FPRNUMBER(i) (((i) - PT_FPR0) >> 1)
#define FPRHALF(i) (((i) - PT_FPR0) & 1)
@@ -308,8 +284,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
case PTRACE_SETVSRREGS:
case PTRACE_GETREGS64:
case PTRACE_SETREGS64:
- case PPC_PTRACE_GETFPREGS:
- case PPC_PTRACE_SETFPREGS:
case PTRACE_KILL:
case PTRACE_SINGLESTEP:
case PTRACE_DETACH:
@@ -322,12 +296,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
ret = arch_ptrace(child, request, addr, data);
break;
- /* Old reverse args ptrace callss */
- case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
- case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
- ret = compat_ptrace_old(child, request, addr, data);
- break;
-
default:
ret = compat_ptrace_request(child, request, addr, data);
break;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 9825f29d1faf..ec8a53fa9e8f 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -150,6 +150,9 @@ notrace void __init machine_init(u64 dt_ptr)
}
#ifdef CONFIG_BOOKE_WDT
+extern u32 booke_wdt_enabled;
+extern u32 booke_wdt_period;
+
/* Checks wdt=x and wdt_period=xx command-line option */
notrace int __init early_parse_wdt(char *p)
{
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d9f94410fd7f..e4cb34322de4 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -57,27 +57,9 @@
#define DBG(fmt...)
#endif
-
-/* Store all idle threads, this can be reused instead of creating
-* a new thread. Also avoids complicated thread destroy functionality
-* for idle threads.
-*/
#ifdef CONFIG_HOTPLUG_CPU
-/*
- * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
- * removed after init for !CONFIG_HOTPLUG_CPU.
- */
-static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
-#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
-#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
-
/* State of each CPU during hotplug phases */
static DEFINE_PER_CPU(int, cpu_state) = { 0 };
-
-#else
-static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
-#define get_idle_for_cpu(x) (idle_thread_array[(x)])
-#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
#endif
struct thread_info *secondary_ti;
@@ -429,60 +411,19 @@ int generic_check_cpu_restart(unsigned int cpu)
}
#endif
-struct create_idle {
- struct work_struct work;
- struct task_struct *idle;
- struct completion done;
- int cpu;
-};
-
-static void __cpuinit do_fork_idle(struct work_struct *work)
+static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
{
- struct create_idle *c_idle =
- container_of(work, struct create_idle, work);
-
- c_idle->idle = fork_idle(c_idle->cpu);
- complete(&c_idle->done);
-}
-
-static int __cpuinit create_idle(unsigned int cpu)
-{
- struct thread_info *ti;
- struct create_idle c_idle = {
- .cpu = cpu,
- .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
- };
- INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
-
- c_idle.idle = get_idle_for_cpu(cpu);
-
- /* We can't use kernel_thread since we must avoid to
- * reschedule the child. We use a workqueue because
- * we want to fork from a kernel thread, not whatever
- * userspace process happens to be trying to online us.
- */
- if (!c_idle.idle) {
- schedule_work(&c_idle.work);
- wait_for_completion(&c_idle.done);
- } else
- init_idle(c_idle.idle, cpu);
- if (IS_ERR(c_idle.idle)) {
- pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
- return PTR_ERR(c_idle.idle);
- }
- ti = task_thread_info(c_idle.idle);
+ struct thread_info *ti = task_thread_info(idle);
#ifdef CONFIG_PPC64
- paca[cpu].__current = c_idle.idle;
+ paca[cpu].__current = idle;
paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
#endif
ti->cpu = cpu;
- current_set[cpu] = ti;
-
- return 0;
+ secondary_ti = current_set[cpu] = ti;
}
-int __cpuinit __cpu_up(unsigned int cpu)
+int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc, c;
@@ -490,12 +431,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
(smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
return -EINVAL;
- /* Make sure we have an idle thread */
- rc = create_idle(cpu);
- if (rc)
- return rc;
-
- secondary_ti = current_set[cpu];
+ cpu_idle_thread_init(cpu, tidle);
/* Make sure callin-map entry is 0 (can be leftover a CPU
* hotplug
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 6aa0c663e247..158972341a2d 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
addr, regs->nip, regs->link, code);
}
- if (!arch_irq_disabled_regs(regs))
+ if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
local_irq_enable();
memset(&info, 0, sizeof(info));
@@ -1019,7 +1019,9 @@ void __kprobes program_check_exception(struct pt_regs *regs)
return;
}
- local_irq_enable();
+ /* We restore the interrupt state now */
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
#ifdef CONFIG_MATH_EMULATION
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
@@ -1069,6 +1071,10 @@ void alignment_exception(struct pt_regs *regs)
{
int sig, code, fixed = 0;
+ /* We restore the interrupt state now */
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
fixed = fix_alignment(regs);
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 4d5a3edff49e..e830289d2e48 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -89,6 +89,16 @@ _GLOBAL(load_up_altivec)
/* restore registers and return */
blr
+_GLOBAL(giveup_altivec_notask)
+ mfmsr r3
+ andis. r4,r3,MSR_VEC@h
+ bnelr /* Already enabled? */
+ oris r3,r3,MSR_VEC@h
+ SYNC
+ MTMSRD(r3) /* enable use of VMX now */
+ isync
+ blr
+
/*
* giveup_altivec(tsk)
* Disable VMX for the task given as the argument,
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index b2f7c8480bf6..cb87301ccd55 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -14,7 +14,9 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/cpu.h>
#include <linux/types.h>
+#include <linux/delay.h>
#include <linux/stat.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -482,7 +484,8 @@ static void vio_cmo_balance(struct work_struct *work)
}
static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
void *ret;
@@ -492,7 +495,7 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
return NULL;
}
- ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
+ ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
if (unlikely(ret == NULL)) {
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
atomic_inc(&viodev->cmo.allocs_failed);
@@ -502,11 +505,12 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
}
static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
- dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
+ dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
}
@@ -607,8 +611,8 @@ static u64 vio_dma_get_required_mask(struct device *dev)
}
struct dma_map_ops vio_dma_mapping_ops = {
- .alloc_coherent = vio_dma_iommu_alloc_coherent,
- .free_coherent = vio_dma_iommu_free_coherent,
+ .alloc = vio_dma_iommu_alloc_coherent,
+ .free = vio_dma_iommu_free_coherent,
.map_sg = vio_dma_iommu_map_sg,
.unmap_sg = vio_dma_iommu_unmap_sg,
.map_page = vio_dma_iommu_map_page,
@@ -707,13 +711,26 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
struct vio_driver *viodrv = to_vio_driver(dev->driver);
unsigned long flags;
size_t size;
+ bool dma_capable = false;
+
+ /* A device requires entitlement if it has a DMA window property */
+ switch (viodev->family) {
+ case VDEVICE:
+ if (of_get_property(viodev->dev.of_node,
+ "ibm,my-dma-window", NULL))
+ dma_capable = true;
+ break;
+ case PFO:
+ dma_capable = false;
+ break;
+ default:
+ dev_warn(dev, "unknown device family: %d\n", viodev->family);
+ BUG();
+ break;
+ }
- /*
- * Check to see that device has a DMA window and configure
- * entitlement for the device.
- */
- if (of_get_property(viodev->dev.of_node,
- "ibm,my-dma-window", NULL)) {
+ /* Configure entitlement for the device. */
+ if (dma_capable) {
/* Check that the driver is CMO enabled and get desired DMA */
if (!viodrv->get_desired_dma) {
dev_err(dev, "%s: device driver does not support CMO\n",
@@ -1048,6 +1065,94 @@ static void vio_cmo_sysfs_init(void) { }
EXPORT_SYMBOL(vio_cmo_entitlement_update);
EXPORT_SYMBOL(vio_cmo_set_dev_desired);
+
+/*
+ * Platform Facilities Option (PFO) support
+ */
+
+/**
+ * vio_h_cop_sync - Perform a synchronous PFO co-processor operation
+ *
+ * @vdev - Pointer to a struct vio_dev for device
+ * @op - Pointer to a struct vio_pfo_op for the operation parameters
+ *
+ * Calls the hypervisor to synchronously perform the PFO operation
+ * described in @op. In the case of a busy response from the hypervisor,
+ * the operation will be re-submitted indefinitely unless a non-zero timeout
+ * is specified or an error occurs. The timeout places a limit on when to
+ * stop re-submitting a operation, the total time can be exceeded if an
+ * operation is in progress.
+ *
+ * If op->hcall_ret is not NULL, this will be set to the return from the
+ * last h_cop_op call or it will be 0 if an error not involving the h_call
+ * was encountered.
+ *
+ * Returns:
+ * 0 on success,
+ * -EINVAL if the h_call fails due to an invalid parameter,
+ * -E2BIG if the h_call can not be performed synchronously,
+ * -EBUSY if a timeout is specified and has elapsed,
+ * -EACCES if the memory area for data/status has been rescinded, or
+ * -EPERM if a hardware fault has been indicated
+ */
+int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
+{
+ struct device *dev = &vdev->dev;
+ unsigned long deadline = 0;
+ long hret = 0;
+ int ret = 0;
+
+ if (op->timeout)
+ deadline = jiffies + msecs_to_jiffies(op->timeout);
+
+ while (true) {
+ hret = plpar_hcall_norets(H_COP, op->flags,
+ vdev->resource_id,
+ op->in, op->inlen, op->out,
+ op->outlen, op->csbcpb);
+
+ if (hret == H_SUCCESS ||
+ (hret != H_NOT_ENOUGH_RESOURCES &&
+ hret != H_BUSY && hret != H_RESOURCE) ||
+ (op->timeout && time_after(deadline, jiffies)))
+ break;
+
+ dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
+ }
+
+ switch (hret) {
+ case H_SUCCESS:
+ ret = 0;
+ break;
+ case H_OP_MODE:
+ case H_TOO_BIG:
+ ret = -E2BIG;
+ break;
+ case H_RESCINDED:
+ ret = -EACCES;
+ break;
+ case H_HARDWARE:
+ ret = -EPERM;
+ break;
+ case H_NOT_ENOUGH_RESOURCES:
+ case H_RESOURCE:
+ case H_BUSY:
+ ret = -EBUSY;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
+ __func__, ret, hret);
+
+ op->hcall_err = hret;
+ return ret;
+}
+EXPORT_SYMBOL(vio_h_cop_sync);
+
static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
{
const unsigned char *dma_window;
@@ -1209,35 +1314,87 @@ static void __devinit vio_dev_release(struct device *dev)
struct vio_dev *vio_register_device_node(struct device_node *of_node)
{
struct vio_dev *viodev;
+ struct device_node *parent_node;
const unsigned int *unit_address;
+ const unsigned int *pfo_resid = NULL;
+ enum vio_dev_family family;
+ const char *of_node_name = of_node->name ? of_node->name : "<unknown>";
- /* we need the 'device_type' property, in order to match with drivers */
- if (of_node->type == NULL) {
- printk(KERN_WARNING "%s: node %s missing 'device_type'\n",
- __func__,
- of_node->name ? of_node->name : "<unknown>");
+ /*
+ * Determine if this node is a under the /vdevice node or under the
+ * /ibm,platform-facilities node. This decides the device's family.
+ */
+ parent_node = of_get_parent(of_node);
+ if (parent_node) {
+ if (!strcmp(parent_node->full_name, "/ibm,platform-facilities"))
+ family = PFO;
+ else if (!strcmp(parent_node->full_name, "/vdevice"))
+ family = VDEVICE;
+ else {
+ pr_warn("%s: parent(%s) of %s not recognized.\n",
+ __func__,
+ parent_node->full_name,
+ of_node_name);
+ of_node_put(parent_node);
+ return NULL;
+ }
+ of_node_put(parent_node);
+ } else {
+ pr_warn("%s: could not determine the parent of node %s.\n",
+ __func__, of_node_name);
return NULL;
}
- unit_address = of_get_property(of_node, "reg", NULL);
- if (unit_address == NULL) {
- printk(KERN_WARNING "%s: node %s missing 'reg'\n",
- __func__,
- of_node->name ? of_node->name : "<unknown>");
- return NULL;
+ if (family == PFO) {
+ if (of_get_property(of_node, "interrupt-controller", NULL)) {
+ pr_debug("%s: Skipping the interrupt controller %s.\n",
+ __func__, of_node_name);
+ return NULL;
+ }
}
/* allocate a vio_dev for this node */
viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
- if (viodev == NULL)
+ if (viodev == NULL) {
+ pr_warn("%s: allocation failure for VIO device.\n", __func__);
return NULL;
+ }
- viodev->irq = irq_of_parse_and_map(of_node, 0);
+ /* we need the 'device_type' property, in order to match with drivers */
+ viodev->family = family;
+ if (viodev->family == VDEVICE) {
+ if (of_node->type != NULL)
+ viodev->type = of_node->type;
+ else {
+ pr_warn("%s: node %s is missing the 'device_type' "
+ "property.\n", __func__, of_node_name);
+ goto out;
+ }
+
+ unit_address = of_get_property(of_node, "reg", NULL);
+ if (unit_address == NULL) {
+ pr_warn("%s: node %s missing 'reg'\n",
+ __func__, of_node_name);
+ goto out;
+ }
+ dev_set_name(&viodev->dev, "%x", *unit_address);
+ viodev->irq = irq_of_parse_and_map(of_node, 0);
+ viodev->unit_address = *unit_address;
+ } else {
+ /* PFO devices need their resource_id for submitting COP_OPs
+ * This is an optional field for devices, but is required when
+ * performing synchronous ops */
+ pfo_resid = of_get_property(of_node, "ibm,resource-id", NULL);
+ if (pfo_resid != NULL)
+ viodev->resource_id = *pfo_resid;
+
+ unit_address = NULL;
+ dev_set_name(&viodev->dev, "%s", of_node_name);
+ viodev->type = of_node_name;
+ viodev->irq = 0;
+ }
- dev_set_name(&viodev->dev, "%x", *unit_address);
viodev->name = of_node->name;
- viodev->type = of_node->type;
- viodev->unit_address = *unit_address;
viodev->dev.of_node = of_node_get(of_node);
if (firmware_has_feature(FW_FEATURE_CMO))
@@ -1265,16 +1422,51 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
}
return viodev;
+
+out: /* Use this exit point for any return prior to device_register */
+ kfree(viodev);
+
+ return NULL;
}
EXPORT_SYMBOL(vio_register_device_node);
+/*
+ * vio_bus_scan_for_devices - Scan OF and register each child device
+ * @root_name - OF node name for the root of the subtree to search.
+ * This must be non-NULL
+ *
+ * Starting from the root node provide, register the device node for
+ * each child beneath the root.
+ */
+static void vio_bus_scan_register_devices(char *root_name)
+{
+ struct device_node *node_root, *node_child;
+
+ if (!root_name)
+ return;
+
+ node_root = of_find_node_by_name(NULL, root_name);
+ if (node_root) {
+
+ /*
+ * Create struct vio_devices for each virtual device in
+ * the device tree. Drivers will associate with them later.
+ */
+ node_child = of_get_next_child(node_root, NULL);
+ while (node_child) {
+ vio_register_device_node(node_child);
+ node_child = of_get_next_child(node_root, node_child);
+ }
+ of_node_put(node_root);
+ }
+}
+
/**
* vio_bus_init: - Initialize the virtual IO bus
*/
static int __init vio_bus_init(void)
{
int err;
- struct device_node *node_vroot;
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_sysfs_init();
@@ -1299,19 +1491,8 @@ static int __init vio_bus_init(void)
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_bus_init();
- node_vroot = of_find_node_by_name(NULL, "vdevice");
- if (node_vroot) {
- struct device_node *of_node;
-
- /*
- * Create struct vio_devices for each virtual device in
- * the device tree. Drivers will associate with them later.
- */
- for (of_node = node_vroot->child; of_node != NULL;
- of_node = of_node->sibling)
- vio_register_device_node(of_node);
- of_node_put(node_vroot);
- }
+ vio_bus_scan_register_devices("vdevice");
+ vio_bus_scan_register_devices("ibm,platform-facilities");
return 0;
}
@@ -1434,12 +1615,28 @@ struct vio_dev *vio_find_node(struct device_node *vnode)
{
const uint32_t *unit_address;
char kobj_name[20];
+ struct device_node *vnode_parent;
+ const char *dev_type;
+
+ vnode_parent = of_get_parent(vnode);
+ if (!vnode_parent)
+ return NULL;
+
+ dev_type = of_get_property(vnode_parent, "device_type", NULL);
+ of_node_put(vnode_parent);
+ if (!dev_type)
+ return NULL;
/* construct the kobject name from the device node */
- unit_address = of_get_property(vnode, "reg", NULL);
- if (!unit_address)
+ if (!strcmp(dev_type, "vdevice")) {
+ unit_address = of_get_property(vnode, "reg", NULL);
+ if (!unit_address)
+ return NULL;
+ snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address);
+ } else if (!strcmp(dev_type, "ibm,platform-facilities"))
+ snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name);
+ else
return NULL;
- snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address);
return vio_find_name(kobj_name);
}