aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig69
-rw-r--r--arch/powerpc/Kconfig.debug9
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/boot/Makefile5
-rw-r--r--arch/powerpc/boot/devtree.c2
-rw-r--r--arch/powerpc/boot/dts/asp834x-redboot.dts20
-rw-r--r--arch/powerpc/boot/dts/bamboo.dts3
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts14
-rw-r--r--arch/powerpc/boot/dts/gef_sbc610.dts11
-rw-r--r--arch/powerpc/boot/dts/ksi8560.dts20
-rw-r--r--arch/powerpc/boot/dts/kuroboxHD.dts1
-rw-r--r--arch/powerpc/boot/dts/kuroboxHG.dts1
-rw-r--r--arch/powerpc/boot/dts/lite5200.dts1
-rw-r--r--arch/powerpc/boot/dts/lite5200b.dts1
-rw-r--r--arch/powerpc/boot/dts/motionpro.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8313erdb.dts20
-rw-r--r--arch/powerpc/boot/dts/mpc8315erdb.dts20
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitx.dts19
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitxgp.dts6
-rw-r--r--arch/powerpc/boot/dts/mpc834x_mds.dts19
-rw-r--r--arch/powerpc/boot/dts/mpc836x_mds.dts43
-rw-r--r--arch/powerpc/boot/dts/mpc836x_rdk.dts19
-rw-r--r--arch/powerpc/boot/dts/mpc8377_mds.dts19
-rw-r--r--arch/powerpc/boot/dts/mpc8377_rdb.dts20
-rw-r--r--arch/powerpc/boot/dts/mpc8378_mds.dts19
-rw-r--r--arch/powerpc/boot/dts/mpc8378_rdb.dts18
-rw-r--r--arch/powerpc/boot/dts/mpc8379_mds.dts18
-rw-r--r--arch/powerpc/boot/dts/mpc8379_rdb.dts19
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds.dts18
-rw-r--r--arch/powerpc/boot/dts/mpc8540ads.dts31
-rw-r--r--arch/powerpc/boot/dts/mpc8541cds.dts18
-rw-r--r--arch/powerpc/boot/dts/mpc8544ds.dts20
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds.dts44
-rw-r--r--arch/powerpc/boot/dts/mpc8555cds.dts18
-rw-r--r--arch/powerpc/boot/dts/mpc8560ads.dts18
-rw-r--r--arch/powerpc/boot/dts/mpc8568mds.dts18
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds.dts158
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts483
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts234
-rw-r--r--arch/powerpc/boot/dts/mpc8641_hpcn.dts101
-rw-r--r--arch/powerpc/boot/dts/pcm030.dts2
-rw-r--r--arch/powerpc/boot/dts/sbc8349.dts18
-rw-r--r--arch/powerpc/boot/dts/sbc8548.dts18
-rw-r--r--arch/powerpc/boot/dts/sbc8560.dts18
-rw-r--r--arch/powerpc/boot/dts/sbc8641d.dts44
-rw-r--r--arch/powerpc/boot/dts/sequoia.dts2
-rw-r--r--arch/powerpc/boot/dts/stx_gp3_8560.dts18
-rw-r--r--arch/powerpc/boot/dts/tqm5200.dts1
-rw-r--r--arch/powerpc/boot/dts/tqm8540.dts28
-rw-r--r--arch/powerpc/boot/dts/tqm8541.dts18
-rw-r--r--arch/powerpc/boot/dts/tqm8548-bigflash.dts44
-rw-r--r--arch/powerpc/boot/dts/tqm8548.dts44
-rw-r--r--arch/powerpc/boot/dts/tqm8555.dts18
-rw-r--r--arch/powerpc/boot/dts/tqm8560.dts18
-rw-r--r--arch/powerpc/boot/install.sh14
-rw-r--r--arch/powerpc/boot/libfdt-wrapper.c2
-rw-r--r--arch/powerpc/configs/85xx/mpc8572_ds_defconfig43
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc610_defconfig8
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig12
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/atomic.h22
-rw-r--r--arch/powerpc/include/asm/bug.h11
-rw-r--r--arch/powerpc/include/asm/byteorder.h77
-rw-r--r--arch/powerpc/include/asm/cputable.h113
-rw-r--r--arch/powerpc/include/asm/dcr-native.h63
-rw-r--r--arch/powerpc/include/asm/dcr.h4
-rw-r--r--arch/powerpc/include/asm/device.h12
-rw-r--r--arch/powerpc/include/asm/disassemble.h80
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h156
-rw-r--r--arch/powerpc/include/asm/eeh.h8
-rw-r--r--arch/powerpc/include/asm/elf.h2
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h30
-rw-r--r--arch/powerpc/include/asm/ftrace.h14
-rw-r--r--arch/powerpc/include/asm/highmem.h23
-rw-r--r--arch/powerpc/include/asm/hugetlb.h6
-rw-r--r--arch/powerpc/include/asm/io.h7
-rw-r--r--arch/powerpc/include/asm/ioctls.h2
-rw-r--r--arch/powerpc/include/asm/kdump.h13
-rw-r--r--arch/powerpc/include/asm/kexec.h64
-rw-r--r--arch/powerpc/include/asm/kvm_44x.h61
-rw-r--r--arch/powerpc/include/asm/kvm_host.h116
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h83
-rw-r--r--arch/powerpc/include/asm/local.h4
-rw-r--r--arch/powerpc/include/asm/lppaca.h3
-rw-r--r--arch/powerpc/include/asm/mmu-40x.h5
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h23
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h3
-rw-r--r--arch/powerpc/include/asm/mmu-fsl-booke.h7
-rw-r--r--arch/powerpc/include/asm/mmu.h57
-rw-r--r--arch/powerpc/include/asm/mmu_context.h257
-rw-r--r--arch/powerpc/include/asm/module.h16
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h19
-rw-r--r--arch/powerpc/include/asm/mpc52xx_psc.h11
-rw-r--r--arch/powerpc/include/asm/mutex.h135
-rw-r--r--arch/powerpc/include/asm/page.h13
-rw-r--r--arch/powerpc/include/asm/page_32.h7
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h30
-rw-r--r--arch/powerpc/include/asm/pci.h15
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h11
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h34
-rw-r--r--arch/powerpc/include/asm/pgalloc.h41
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h42
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h15
-rw-r--r--arch/powerpc/include/asm/pgtable.h26
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h4
-rw-r--r--arch/powerpc/include/asm/processor.h8
-rw-r--r--arch/powerpc/include/asm/prom.h3
-rw-r--r--arch/powerpc/include/asm/ps3.h58
-rw-r--r--arch/powerpc/include/asm/ps3av.h4
-rw-r--r--arch/powerpc/include/asm/qe.h37
-rw-r--r--arch/powerpc/include/asm/qe_ic.h21
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/rtas.h1
-rw-r--r--arch/powerpc/include/asm/sfp-machine.h58
-rw-r--r--arch/powerpc/include/asm/smp.h7
-rw-r--r--arch/powerpc/include/asm/spinlock.h2
-rw-r--r--arch/powerpc/include/asm/spu.h2
-rw-r--r--arch/powerpc/include/asm/swab.h90
-rw-r--r--arch/powerpc/include/asm/synch.h4
-rw-r--r--arch/powerpc/include/asm/system.h24
-rw-r--r--arch/powerpc/include/asm/time.h20
-rw-r--r--arch/powerpc/include/asm/tlbflush.h87
-rw-r--r--arch/powerpc/include/asm/topology.h13
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h3
-rw-r--r--arch/powerpc/kernel/Makefile7
-rw-r--r--arch/powerpc/kernel/asm-offsets.c27
-rw-r--r--arch/powerpc/kernel/cacheinfo.c837
-rw-r--r--arch/powerpc/kernel/cacheinfo.h8
-rw-r--r--arch/powerpc/kernel/cputable.c117
-rw-r--r--arch/powerpc/kernel/dma.c26
-rw-r--r--arch/powerpc/kernel/entry_32.S40
-rw-r--r--arch/powerpc/kernel/entry_64.S12
-rw-r--r--arch/powerpc/kernel/ftrace.c461
-rw-r--r--arch/powerpc/kernel/head_32.S31
-rw-r--r--arch/powerpc/kernel/head_44x.S34
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S107
-rw-r--r--arch/powerpc/kernel/ibmebus.c3
-rw-r--r--arch/powerpc/kernel/idle.c5
-rw-r--r--arch/powerpc/kernel/init_task.c1
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/kprobes.c9
-rw-r--r--arch/powerpc/kernel/machine_kexec.c91
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c78
-rw-r--r--arch/powerpc/kernel/misc_32.S238
-rw-r--r--arch/powerpc/kernel/module.c6
-rw-r--r--arch/powerpc/kernel/module_32.c10
-rw-r--r--arch/powerpc/kernel/module_64.c13
-rw-r--r--arch/powerpc/kernel/of_device.c18
-rw-r--r--arch/powerpc/kernel/paca.c1
-rw-r--r--arch/powerpc/kernel/pci-common.c371
-rw-r--r--arch/powerpc/kernel/pci_32.c108
-rw-r--r--arch/powerpc/kernel/pci_64.c143
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c10
-rw-r--r--arch/powerpc/kernel/ppc_save_regs.S (renamed from arch/powerpc/xmon/setjmp.S)2
-rw-r--r--arch/powerpc/kernel/process.c5
-rw-r--r--arch/powerpc/kernel/prom.c61
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/prom_parse.c12
-rw-r--r--arch/powerpc/kernel/rtas.c26
-rw-r--r--arch/powerpc/kernel/rtas_pci.c48
-rw-r--r--arch/powerpc/kernel/setup_32.c15
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c12
-rw-r--r--arch/powerpc/kernel/smp.c75
-rw-r--r--arch/powerpc/kernel/swsusp.c2
-rw-r--r--arch/powerpc/kernel/swsusp_32.S6
-rw-r--r--arch/powerpc/kernel/sysfs.c305
-rw-r--r--arch/powerpc/kernel/time.c56
-rw-r--r--arch/powerpc/kernel/traps.c62
-rw-r--r--arch/powerpc/kernel/vdso.c13
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S208
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S3
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S141
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S3
-rw-r--r--arch/powerpc/kernel/vio.c12
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S6
-rw-r--r--arch/powerpc/kvm/44x.c228
-rw-r--r--arch/powerpc/kvm/44x_emulate.c371
-rw-r--r--arch/powerpc/kvm/44x_tlb.c463
-rw-r--r--arch/powerpc/kvm/44x_tlb.h26
-rw-r--r--arch/powerpc/kvm/Kconfig28
-rw-r--r--arch/powerpc/kvm/Makefile12
-rw-r--r--arch/powerpc/kvm/booke.c (renamed from arch/powerpc/kvm/booke_guest.c)418
-rw-r--r--arch/powerpc/kvm/booke.h60
-rw-r--r--arch/powerpc/kvm/booke_host.c83
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S72
-rw-r--r--arch/powerpc/kvm/emulate.c447
-rw-r--r--arch/powerpc/kvm/powerpc.c133
-rw-r--r--arch/powerpc/kvm/timing.c239
-rw-r--r--arch/powerpc/kvm/timing.h102
-rw-r--r--arch/powerpc/lib/Makefile3
-rw-r--r--arch/powerpc/lib/copyuser_64.S17
-rw-r--r--arch/powerpc/lib/dma-noncoherent.c25
-rw-r--r--arch/powerpc/lib/memcpy_64.S16
-rw-r--r--arch/powerpc/math-emu/Makefile2
-rw-r--r--arch/powerpc/math-emu/fadd.c1
-rw-r--r--arch/powerpc/math-emu/fcmpo.c5
-rw-r--r--arch/powerpc/math-emu/fdiv.c9
-rw-r--r--arch/powerpc/math-emu/fdivs.c9
-rw-r--r--arch/powerpc/math-emu/fmadd.c5
-rw-r--r--arch/powerpc/math-emu/fmadds.c5
-rw-r--r--arch/powerpc/math-emu/fmsub.c5
-rw-r--r--arch/powerpc/math-emu/fmsubs.c5
-rw-r--r--arch/powerpc/math-emu/fmul.c3
-rw-r--r--arch/powerpc/math-emu/fmuls.c3
-rw-r--r--arch/powerpc/math-emu/fnmadd.c5
-rw-r--r--arch/powerpc/math-emu/fnmadds.c5
-rw-r--r--arch/powerpc/math-emu/fnmsub.c5
-rw-r--r--arch/powerpc/math-emu/fnmsubs.c5
-rw-r--r--arch/powerpc/math-emu/fsqrt.c5
-rw-r--r--arch/powerpc/math-emu/fsqrts.c5
-rw-r--r--arch/powerpc/math-emu/fsub.c3
-rw-r--r--arch/powerpc/math-emu/fsubs.c3
-rw-r--r--arch/powerpc/math-emu/math_efp.c720
-rw-r--r--arch/powerpc/mm/Makefile10
-rw-r--r--arch/powerpc/mm/fault.c16
-rw-r--r--arch/powerpc/mm/hash_low_32.S111
-rw-r--r--arch/powerpc/mm/hugetlbpage.c29
-rw-r--r--arch/powerpc/mm/init_32.c6
-rw-r--r--arch/powerpc/mm/mem.c8
-rw-r--r--arch/powerpc/mm/mmu_context_32.c84
-rw-r--r--arch/powerpc/mm/mmu_context_hash32.c103
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c (renamed from arch/powerpc/mm/mmu_context_64.c)8
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c397
-rw-r--r--arch/powerpc/mm/mmu_decl.h65
-rw-r--r--arch/powerpc/mm/numa.c62
-rw-r--r--arch/powerpc/mm/pgtable.c117
-rw-r--r--arch/powerpc/mm/pgtable_32.c59
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c10
-rw-r--r--arch/powerpc/mm/tlb_hash32.c (renamed from arch/powerpc/mm/tlb_32.c)4
-rw-r--r--arch/powerpc/mm/tlb_hash64.c (renamed from arch/powerpc/mm/tlb_64.c)86
-rw-r--r--arch/powerpc/mm/tlb_nohash.c210
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S166
-rw-r--r--arch/powerpc/oprofile/cell/pr_util.h2
-rw-r--r--arch/powerpc/oprofile/cell/spu_profiler.c2
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c2
-rw-r--r--arch/powerpc/platforms/40x/ep405.c2
-rw-r--r--arch/powerpc/platforms/40x/kilauea.c2
-rw-r--r--arch/powerpc/platforms/40x/ppc40x_simple.c2
-rw-r--r--arch/powerpc/platforms/44x/ebony.c2
-rw-r--r--arch/powerpc/platforms/44x/ppc44x_simple.c2
-rw-r--r--arch/powerpc/platforms/44x/sam440ep.c2
-rw-r--r--arch/powerpc/platforms/52xx/lite5200_pm.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pci.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c237
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.h53
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pm.c3
-rw-r--r--arch/powerpc/platforms/82xx/pq2.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc831x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c9
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c81
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_rdk.c6
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_mds.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h1
-rw-r--r--arch/powerpc/platforms/85xx/Makefile2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c18
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c8
-rw-r--r--arch/powerpc/platforms/85xx/smp.c105
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/86xx/Makefile3
-rw-r--r--arch/powerpc/platforms/86xx/gef_gpio.c143
-rw-r--r--arch/powerpc/platforms/Kconfig11
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype14
-rw-r--r--arch/powerpc/platforms/cell/Kconfig23
-rw-r--r--arch/powerpc/platforms/cell/Makefile17
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c21
-rw-r--r--arch/powerpc/platforms/cell/beat_udbg.c4
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c2
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c9
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c2
-rw-r--r--arch/powerpc/platforms/cell/io-workarounds.c4
-rw-r--r--arch/powerpc/platforms/cell/iommu.c9
-rw-r--r--arch/powerpc/platforms/cell/qpace_setup.c152
-rw-r--r--arch/powerpc/platforms/cell/setup.c6
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c27
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c9
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h2
-rw-r--r--arch/powerpc/platforms/chrp/pci.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/c2k.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/prpmc2800.c6
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig6
-rw-r--r--arch/powerpc/platforms/iseries/setup.c11
-rw-r--r--arch/powerpc/platforms/maple/setup.c6
-rw-r--r--arch/powerpc/platforms/pasemi/cpufreq.c2
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c2
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c2
-rw-r--r--arch/powerpc/platforms/powermac/pci.c8
-rw-r--r--arch/powerpc/platforms/powermac/setup.c10
-rw-r--r--arch/powerpc/platforms/powermac/sleep.S5
-rw-r--r--arch/powerpc/platforms/powermac/smp.c2
-rw-r--r--arch/powerpc/platforms/powermac/time.c11
-rw-r--r--arch/powerpc/platforms/ps3/device-init.c70
-rw-r--r--arch/powerpc/platforms/ps3/mm.c2
-rw-r--r--arch/powerpc/platforms/ps3/setup.c8
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c38
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c29
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c44
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c163
-rw-r--r--arch/powerpc/platforms/pseries/phyp_dump.c5
-rw-r--r--arch/powerpc/platforms/pseries/rtasd.c4
-rw-r--r--arch/powerpc/platforms/pseries/xics.c47
-rw-r--r--arch/powerpc/sysdev/Makefile1
-rw-r--r--arch/powerpc/sysdev/bestcomm/ata.c3
-rw-r--r--arch/powerpc/sysdev/bestcomm/ata.h19
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.c7
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.h61
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm_priv.h20
-rw-r--r--arch/powerpc/sysdev/dcr-low.S8
-rw-r--r--arch/powerpc/sysdev/dcr.c5
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c11
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c241
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h5
-rw-r--r--arch/powerpc/sysdev/grackle.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c36
-rw-r--r--arch/powerpc/sysdev/mpic.h2
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c306
-rw-r--r--arch/powerpc/sysdev/qe_lib/Kconfig3
-rw-r--r--arch/powerpc/sysdev/qe_lib/gpio.c195
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c3
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc.c4
-rw-r--r--arch/powerpc/sysdev/simple_gpio.c155
-rw-r--r--arch/powerpc/sysdev/simple_gpio.h12
-rw-r--r--arch/powerpc/xmon/Makefile2
-rw-r--r--arch/powerpc/xmon/xmon.c5
331 files changed, 11260 insertions, 4505 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 525c13a4de93..84b861316ce7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -108,6 +108,8 @@ config ARCH_NO_VIRT_TO_BUS
config PPC
bool
default y
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER
select ARCH_WANT_OPTIONAL_GPIOLIB
select HAVE_IDE
@@ -141,7 +143,7 @@ config GENERIC_NVRAM
bool
default y if PPC32
-config SCHED_NO_NO_OMIT_FRAME_POINTER
+config SCHED_OMIT_FRAME_POINTER
bool
default y
@@ -285,6 +287,10 @@ config IOMMU_VMERGE
config IOMMU_HELPER
def_bool PPC64
+config PPC_NEED_DMA_SYNC_OPS
+ def_bool y
+ depends on NOT_COHERENT_CACHE
+
config HOTPLUG_CPU
bool "Support for enabling/disabling CPUs"
depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
@@ -322,7 +328,8 @@ config KEXEC
config CRASH_DUMP
bool "Build a kdump crash kernel"
- depends on PPC_MULTIPLATFORM && PPC64 && RELOCATABLE
+ depends on PPC64 || 6xx
+ select RELOCATABLE if PPC64
help
Build a kernel suitable for use as a kdump capture kernel.
The same kernel binary can be used as production kernel and dump
@@ -401,23 +408,53 @@ config PPC_HAS_HASH_64K
depends on PPC64
default n
-config PPC_64K_PAGES
- bool "64k page size"
- depends on PPC64
- select PPC_HAS_HASH_64K
+choice
+ prompt "Page size"
+ default PPC_4K_PAGES
help
- This option changes the kernel logical page size to 64k. On machines
- without processor support for 64k pages, the kernel will simulate
- them by loading each individual 4k page on demand transparently,
- while on hardware with such support, it will be used to map
- normal application pages.
+ Select the kernel logical page size. Increasing the page size
+ will reduce software overhead at each page boundary, allow
+ hardware prefetch mechanisms to be more effective, and allow
+ larger dma transfers increasing IO efficiency and reducing
+ overhead. However the utilization of memory will increase.
+ For example, each cached file will using a multiple of the
+ page size to hold its contents and the difference between the
+ end of file and the end of page is wasted.
+
+ Some dedicated systems, such as software raid serving with
+ accelerated calculations, have shown significant increases.
+
+ If you configure a 64 bit kernel for 64k pages but the
+ processor does not support them, then the kernel will simulate
+ them with 4k pages, loading them on demand, but with the
+ reduced software overhead and larger internal fragmentation.
+ For the 32 bit kernel, a large page option will not be offered
+ unless it is supported by the configured processor.
+
+ If unsure, choose 4K_PAGES.
+
+config PPC_4K_PAGES
+ bool "4k page size"
+
+config PPC_16K_PAGES
+ bool "16k page size" if 44x
+
+config PPC_64K_PAGES
+ bool "64k page size" if 44x || PPC_STD_MMU_64
+ select PPC_HAS_HASH_64K if PPC_STD_MMU_64
+
+endchoice
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
- range 9 64 if PPC_64K_PAGES
- default "9" if PPC_64K_PAGES
- range 13 64 if PPC64 && !PPC_64K_PAGES
- default "13" if PPC64 && !PPC_64K_PAGES
+ range 9 64 if PPC_STD_MMU_64 && PPC_64K_PAGES
+ default "9" if PPC_STD_MMU_64 && PPC_64K_PAGES
+ range 13 64 if PPC_STD_MMU_64 && !PPC_64K_PAGES
+ default "13" if PPC_STD_MMU_64 && !PPC_64K_PAGES
+ range 9 64 if PPC_STD_MMU_32 && PPC_16K_PAGES
+ default "9" if PPC_STD_MMU_32 && PPC_16K_PAGES
+ range 7 64 if PPC_STD_MMU_32 && PPC_64K_PAGES
+ default "7" if PPC_STD_MMU_32 && PPC_64K_PAGES
range 11 64
default "11"
help
@@ -437,7 +474,7 @@ config FORCE_MAX_ZONEORDER
config PPC_SUBPAGE_PROT
bool "Support setting protections for 4k subpages"
- depends on PPC_64K_PAGES
+ depends on PPC_STD_MMU_64 && PPC_64K_PAGES
help
This option adds support for a system call to allow user programs
to set access permissions (read/write, readonly, or no access)
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 15eb27861fc7..08f7cc0a1953 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -2,6 +2,15 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
+config PRINT_STACK_DEPTH
+ int "Stack depth to print" if DEBUG_KERNEL
+ default 64
+ help
+ This option allows you to set the stack depth that the kernel
+ prints in stack traces. This can be useful if your display is
+ too small and stack traces cause important information to
+ scroll off the screen.
+
config DEBUG_STACKOVERFLOW
bool "Check for stack overflows"
depends on DEBUG_KERNEL
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 1f0667069940..72d17f50e54f 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -107,7 +107,6 @@ KBUILD_CFLAGS += $(call cc-option,-mno-altivec)
# (We use all available options to help semi-broken compilers)
KBUILD_CFLAGS += $(call cc-option,-mno-spe)
KBUILD_CFLAGS += $(call cc-option,-mspe=no)
-KBUILD_CFLAGS += $(call cc-option,-mabi=no-spe)
# Enable unit-at-a-time mode when possible. It shrinks the
# kernel considerably.
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 3d3daa674299..e84df338ea29 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -194,6 +194,7 @@ image-$(CONFIG_PPC_MAPLE) += zImage.pseries
image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries
image-$(CONFIG_PPC_PS3) += dtbImage.ps3
image-$(CONFIG_PPC_CELLEB) += zImage.pseries
+image-$(CONFIG_PPC_CELL_QPACE) += zImage.pseries
image-$(CONFIG_PPC_CHRP) += zImage.chrp
image-$(CONFIG_PPC_EFIKA) += zImage.chrp
image-$(CONFIG_PPC_PMAC) += zImage.pmac
@@ -207,7 +208,7 @@ image-$(CONFIG_DEFAULT_UIMAGE) += uImage
#
# Theses are default targets to build images which embed device tree blobs.
# They are only required on boards which do not have FDT support in firmware.
-# Boards with newish u-boot firmare can use the uImage target above
+# Boards with newish u-boot firmware can use the uImage target above
#
# Board ports in arch/powerpc/platform/40x/Kconfig
@@ -355,7 +356,7 @@ $(obj)/zImage.initrd: $(addprefix $(obj)/, $(initrd-y))
@rm -f $@; ln $< $@
install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
- sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $<
+ sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $^
# anything not in $(targets)
clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
diff --git a/arch/powerpc/boot/devtree.c b/arch/powerpc/boot/devtree.c
index 5d12336dc360..a7e21a35c03a 100644
--- a/arch/powerpc/boot/devtree.c
+++ b/arch/powerpc/boot/devtree.c
@@ -213,7 +213,7 @@ static int find_range(u32 *reg, u32 *ranges, int nregaddr,
u32 range_addr[MAX_ADDR_CELLS];
u32 range_size[MAX_ADDR_CELLS];
- copy_val(range_addr, ranges + i, naddr);
+ copy_val(range_addr, ranges + i, nregaddr);
copy_val(range_size, ranges + i + nregaddr + naddr, nsize);
if (compare_reg(reg, range_addr, range_size))
diff --git a/arch/powerpc/boot/dts/asp834x-redboot.dts b/arch/powerpc/boot/dts/asp834x-redboot.dts
index 6235fca445de..524af7ef9f26 100644
--- a/arch/powerpc/boot/dts/asp834x-redboot.dts
+++ b/arch/powerpc/boot/dts/asp834x-redboot.dts
@@ -199,8 +199,26 @@
reg = <0x2>;
device_type = "ethernet-phy";
};
+
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+
enet0: ethernet@24000 {
cell-index = <0>;
device_type = "network";
@@ -210,6 +228,7 @@
local-mac-address = [ 00 08 e5 11 32 33 ];
interrupts = <32 0x8 33 0x8 34 0x8>;
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
linux,network-index = <0>;
};
@@ -223,6 +242,7 @@
local-mac-address = [ 00 08 e5 11 32 34 ];
interrupts = <35 0x8 36 0x8 37 0x8>;
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
linux,network-index = <1>;
};
diff --git a/arch/powerpc/boot/dts/bamboo.dts b/arch/powerpc/boot/dts/bamboo.dts
index 6ce0cc2c0208..aa68911f6560 100644
--- a/arch/powerpc/boot/dts/bamboo.dts
+++ b/arch/powerpc/boot/dts/bamboo.dts
@@ -269,7 +269,8 @@
* later cannot be changed. Chip supports a second
* IO range but we don't use it for now
*/
- ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x20000000
+ ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x40000000
+ 0x02000000 0x00000000 0x00000000 0x00000000 0xe0000000 0x00000000 0x00100000
0x01000000 0x00000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>;
/* Inbound 2GB range starting at 0 */
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index 79fe412c11c9..8b5ba8261a36 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -40,6 +40,7 @@
d-cache-size = <32768>;
dcr-controller;
dcr-access-method = "native";
+ next-level-cache = <&L2C0>;
};
};
@@ -104,6 +105,16 @@
dcr-reg = <0x00c 0x002>;
};
+ L2C0: l2c {
+ compatible = "ibm,l2-cache-460ex", "ibm,l2-cache";
+ dcr-reg = <0x020 0x008 /* Internal SRAM DCR's */
+ 0x030 0x008>; /* L2 cache DCR's */
+ cache-line-size = <32>; /* 32 bytes */
+ cache-size = <262144>; /* L2, 256K */
+ interrupt-parent = <&UIC1>;
+ interrupts = <11 1>;
+ };
+
plb {
compatible = "ibm,plb-460ex", "ibm,plb4";
#address-cells = <2>;
@@ -343,6 +354,7 @@
* later cannot be changed
*/
ranges = <0x02000000 0x00000000 0x80000000 0x0000000d 0x80000000 0x00000000 0x80000000
+ 0x02000000 0x00000000 0x00000000 0x0000000c 0x0ee00000 0x00000000 0x00100000
0x01000000 0x00000000 0x00000000 0x0000000c 0x08000000 0x00000000 0x00010000>;
/* Inbound 2GB range starting at 0 */
@@ -373,6 +385,7 @@
* later cannot be changed
*/
ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x00000000 0x00000000 0x80000000
+ 0x02000000 0x00000000 0x00000000 0x0000000f 0x00000000 0x00000000 0x00100000
0x01000000 0x00000000 0x00000000 0x0000000f 0x80000000 0x00000000 0x00010000>;
/* Inbound 2GB range starting at 0 */
@@ -414,6 +427,7 @@
* later cannot be changed
*/
ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x80000000 0x00000000 0x80000000
+ 0x02000000 0x00000000 0x00000000 0x0000000f 0x00100000 0x00000000 0x00100000
0x01000000 0x00000000 0x00000000 0x0000000f 0x80010000 0x00000000 0x00010000>;
/* Inbound 2GB range starting at 0 */
diff --git a/arch/powerpc/boot/dts/gef_sbc610.dts b/arch/powerpc/boot/dts/gef_sbc610.dts
index e48cfa740c8a..9708b3423bbd 100644
--- a/arch/powerpc/boot/dts/gef_sbc610.dts
+++ b/arch/powerpc/boot/dts/gef_sbc610.dts
@@ -98,6 +98,12 @@
interrupt-parent = <&mpic>;
};
+ gef_gpio: gpio@7,14000 {
+ #gpio-cells = <2>;
+ compatible = "gef,sbc610-gpio";
+ reg = <0x7 0x14000 0x24>;
+ gpio-controller;
+ };
};
soc@fef00000 {
@@ -119,6 +125,11 @@
interrupt-parent = <&mpic>;
dfsrr;
+ rtc@51 {
+ compatible = "epson,rx8581";
+ reg = <0x00000051>;
+ };
+
eti@6b {
compatible = "dallas,ds1682";
reg = <0x6b>;
diff --git a/arch/powerpc/boot/dts/ksi8560.dts b/arch/powerpc/boot/dts/ksi8560.dts
index 49737589ffc8..3bfff47418db 100644
--- a/arch/powerpc/boot/dts/ksi8560.dts
+++ b/arch/powerpc/boot/dts/ksi8560.dts
@@ -141,8 +141,26 @@
reg = <0x2>;
device_type = "ethernet-phy";
};
+
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+
enet0: ethernet@24000 {
device_type = "network";
model = "TSEC";
@@ -152,6 +170,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&PHY1>;
};
@@ -164,6 +183,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&PHY2>;
};
diff --git a/arch/powerpc/boot/dts/kuroboxHD.dts b/arch/powerpc/boot/dts/kuroboxHD.dts
index 2e5a1a1812b6..8d725d10882f 100644
--- a/arch/powerpc/boot/dts/kuroboxHD.dts
+++ b/arch/powerpc/boot/dts/kuroboxHD.dts
@@ -76,7 +76,6 @@ XXXX add flash parts, rtc, ??
interrupt-parent = <&mpic>;
rtc@32 {
- device_type = "rtc";
compatible = "ricoh,rs5c372a";
reg = <0x32>;
};
diff --git a/arch/powerpc/boot/dts/kuroboxHG.dts b/arch/powerpc/boot/dts/kuroboxHG.dts
index e4916e69ad31..b13a11eb81b0 100644
--- a/arch/powerpc/boot/dts/kuroboxHG.dts
+++ b/arch/powerpc/boot/dts/kuroboxHG.dts
@@ -76,7 +76,6 @@ XXXX add flash parts, rtc, ??
interrupt-parent = <&mpic>;
rtc@32 {
- device_type = "rtc";
compatible = "ricoh,rs5c372a";
reg = <0x32>;
};
diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts
index 2cf9a8768f44..3f7a5dce8de0 100644
--- a/arch/powerpc/boot/dts/lite5200.dts
+++ b/arch/powerpc/boot/dts/lite5200.dts
@@ -130,7 +130,6 @@
rtc@800 { // Real time clock
compatible = "fsl,mpc5200-rtc";
- device_type = "rtc";
reg = <0x800 0x100>;
interrupts = <1 5 0 1 6 0>;
interrupt-parent = <&mpc5200_pic>;
diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts
index 7bd5b9c399b8..63e3bb48e843 100644
--- a/arch/powerpc/boot/dts/lite5200b.dts
+++ b/arch/powerpc/boot/dts/lite5200b.dts
@@ -130,7 +130,6 @@
rtc@800 { // Real time clock
compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
- device_type = "rtc";
reg = <0x800 0x100>;
interrupts = <1 5 0 1 6 0>;
interrupt-parent = <&mpc5200_pic>;
diff --git a/arch/powerpc/boot/dts/motionpro.dts b/arch/powerpc/boot/dts/motionpro.dts
index 9e3c921be164..52ba6f98b273 100644
--- a/arch/powerpc/boot/dts/motionpro.dts
+++ b/arch/powerpc/boot/dts/motionpro.dts
@@ -248,7 +248,6 @@
fsl5200-clocking;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
};
diff --git a/arch/powerpc/boot/dts/mpc8313erdb.dts b/arch/powerpc/boot/dts/mpc8313erdb.dts
index 503031766825..d4df8b6857a4 100644
--- a/arch/powerpc/boot/dts/mpc8313erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8313erdb.dts
@@ -190,6 +190,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <37 0x8 36 0x8 35 0x8>;
interrupt-parent = <&ipic>;
+ tbi-handle = < &tbi0 >;
phy-handle = < &phy1 >;
fsl,magic-packet;
@@ -210,6 +211,10 @@
reg = <0x4>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
};
@@ -222,9 +227,24 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <34 0x8 33 0x8 32 0x8>;
interrupt-parent = <&ipic>;
+ tbi-handle = < &tbi1 >;
phy-handle = < &phy4 >;
sleep = <&pmc 0x10000000>;
fsl,magic-packet;
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+
};
serial0: serial@4500 {
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
index 6b850670de1d..072c9b0f8c8e 100644
--- a/arch/powerpc/boot/dts/mpc8315erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
@@ -117,7 +117,6 @@
interrupt-parent = <&ipic>;
dfsrr;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
};
@@ -206,8 +205,25 @@
reg = <0x1>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
+
enet0: ethernet@24000 {
cell-index = <0>;
device_type = "network";
@@ -217,6 +233,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <32 0x8 33 0x8 34 0x8>;
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi0>;
phy-handle = < &phy0 >;
};
@@ -229,6 +246,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 0x8 36 0x8 37 0x8>;
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi1>;
phy-handle = < &phy1 >;
};
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index 4bdbaf4993a1..b5eda94a8e2a 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -85,7 +85,6 @@
dfsrr;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
interrupts = <18 0x8>;
@@ -184,6 +183,22 @@
reg = <0x1c>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -195,6 +210,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <32 0x8 33 0x8 34 0x8>;
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy1c>;
linux,network-index = <0>;
};
@@ -211,6 +227,7 @@
/* Vitesse 7385 isn't on the MDIO bus */
fixed-link = <1 1 1000 0 0>;
linux,network-index = <1>;
+ tbi-handle = <&tbi1>;
};
serial0: serial@4500 {
diff --git a/arch/powerpc/boot/dts/mpc8349emitxgp.dts b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
index fa40647ee62e..c87a6015e165 100644
--- a/arch/powerpc/boot/dts/mpc8349emitxgp.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
@@ -83,7 +83,6 @@
dfsrr;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
interrupts = <18 0x8>;
@@ -163,6 +162,10 @@
reg = <0x1c>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -174,6 +177,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <32 0x8 33 0x8 34 0x8>;
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy1c>;
linux,network-index = <0>;
};
diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts
index c986c541e9bb..d9adba01c09c 100644
--- a/arch/powerpc/boot/dts/mpc834x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc834x_mds.dts
@@ -185,8 +185,25 @@
reg = <0x1>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
+
enet0: ethernet@24000 {
cell-index = <0>;
device_type = "network";
@@ -196,6 +213,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <32 0x8 33 0x8 34 0x8>;
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
linux,network-index = <0>;
};
@@ -209,6 +227,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 0x8 36 0x8 37 0x8>;
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
linux,network-index = <1>;
};
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index 14534d04e4db..6e34f170fa62 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -69,8 +69,18 @@
};
bcsr@1,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
compatible = "fsl,mpc8360mds-bcsr";
reg = <1 0 0x8000>;
+ ranges = <0 1 0 0x8000>;
+
+ bcsr13: gpio-controller@d {
+ #gpio-cells = <2>;
+ compatible = "fsl,mpc8360mds-bcsr-gpio";
+ reg = <0xd 1>;
+ gpio-controller;
+ };
};
};
@@ -195,10 +205,21 @@
};
par_io@1400 {
+ #address-cells = <1>;
+ #size-cells = <1>;
reg = <0x1400 0x100>;
+ ranges = <0 0x1400 0x100>;
device_type = "par_io";
num-ports = <7>;
+ qe_pio_b: gpio-controller@18 {
+ #gpio-cells = <2>;
+ compatible = "fsl,mpc8360-qe-pario-bank",
+ "fsl,mpc8323-qe-pario-bank";
+ reg = <0x18 0x18>;
+ gpio-controller;
+ };
+
pio1: ucc_pin@01 {
pio-map = <
/* port pin dir open_drain assignment has_irq */
@@ -282,6 +303,15 @@
};
};
+ timer@440 {
+ compatible = "fsl,mpc8360-qe-gtm",
+ "fsl,qe-gtm", "fsl,gtm";
+ reg = <0x440 0x40>;
+ clock-frequency = <132000000>;
+ interrupts = <12 13 14 15>;
+ interrupt-parent = <&qeic>;
+ };
+
spi@4c0 {
cell-index = <0>;
compatible = "fsl,spi";
@@ -301,11 +331,20 @@
};
usb@6c0 {
- compatible = "qe_udc";
+ compatible = "fsl,mpc8360-qe-usb",
+ "fsl,mpc8323-qe-usb";
reg = <0x6c0 0x40 0x8b00 0x100>;
interrupts = <11>;
interrupt-parent = <&qeic>;
- mode = "slave";
+ fsl,fullspeed-clock = "clk21";
+ fsl,lowspeed-clock = "brg9";
+ gpios = <&qe_pio_b 2 0 /* USBOE */
+ &qe_pio_b 3 0 /* USBTP */
+ &qe_pio_b 8 0 /* USBTN */
+ &qe_pio_b 9 0 /* USBRP */
+ &qe_pio_b 11 0 /* USBRN */
+ &bcsr13 5 0 /* SPEED */
+ &bcsr13 4 1>; /* POWER */
};
enet0: ucc@2000 {
diff --git a/arch/powerpc/boot/dts/mpc836x_rdk.dts b/arch/powerpc/boot/dts/mpc836x_rdk.dts
index decadf3d9e98..37b789510d68 100644
--- a/arch/powerpc/boot/dts/mpc836x_rdk.dts
+++ b/arch/powerpc/boot/dts/mpc836x_rdk.dts
@@ -218,8 +218,23 @@
reg = <0x440 0x40>;
interrupts = <12 13 14 15>;
interrupt-parent = <&qeic>;
- /* filled by u-boot */
- clock-frequency = <0>;
+ clock-frequency = <166666666>;
+ };
+
+ usb@6c0 {
+ compatible = "fsl,mpc8360-qe-usb",
+ "fsl,mpc8323-qe-usb";
+ reg = <0x6c0 0x40 0x8b00 0x100>;
+ interrupts = <11>;
+ interrupt-parent = <&qeic>;
+ fsl,fullspeed-clock = "clk21";
+ gpios = <&qe_pio_b 2 0 /* USBOE */
+ &qe_pio_b 3 0 /* USBTP */
+ &qe_pio_b 8 0 /* USBTN */
+ &qe_pio_b 9 0 /* USBRP */
+ &qe_pio_b 11 0 /* USBRN */
+ &qe_pio_e 20 0 /* SPEED */
+ &qe_pio_e 21 1 /* POWER */>;
};
spi@4c0 {
diff --git a/arch/powerpc/boot/dts/mpc8377_mds.dts b/arch/powerpc/boot/dts/mpc8377_mds.dts
index 0484561bd2c0..1d14d7052e6d 100644
--- a/arch/powerpc/boot/dts/mpc8377_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8377_mds.dts
@@ -193,8 +193,25 @@
reg = <0x3>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
+
enet0: ethernet@24000 {
cell-index = <0>;
device_type = "network";
@@ -205,6 +222,7 @@
interrupts = <32 0x8 33 0x8 34 0x8>;
phy-connection-type = "mii";
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy2>;
};
@@ -218,6 +236,7 @@
interrupts = <35 0x8 36 0x8 37 0x8>;
phy-connection-type = "mii";
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy3>;
};
diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts
index 435ef3dd022d..9413af3b9925 100644
--- a/arch/powerpc/boot/dts/mpc8377_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts
@@ -117,7 +117,6 @@
interrupt-parent = <&ipic>;
dfsrr;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
};
@@ -211,8 +210,25 @@
reg = <0x2>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
+
enet0: ethernet@24000 {
cell-index = <0>;
device_type = "network";
@@ -223,6 +239,7 @@
interrupts = <32 0x8 33 0x8 34 0x8>;
phy-connection-type = "mii";
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy2>;
};
@@ -237,6 +254,7 @@
phy-connection-type = "mii";
interrupt-parent = <&ipic>;
fixed-link = <1 1 1000 0 0>;
+ tbi-handle = <&tbi1>;
};
serial0: serial@4500 {
diff --git a/arch/powerpc/boot/dts/mpc8378_mds.dts b/arch/powerpc/boot/dts/mpc8378_mds.dts
index 67a08d2e2ff2..b85fc02682d2 100644
--- a/arch/powerpc/boot/dts/mpc8378_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8378_mds.dts
@@ -232,8 +232,25 @@
reg = <0x3>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
+
enet0: ethernet@24000 {
cell-index = <0>;
device_type = "network";
@@ -244,6 +261,7 @@
interrupts = <32 0x8 33 0x8 34 0x8>;
phy-connection-type = "mii";
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy2>;
};
@@ -257,6 +275,7 @@
interrupts = <35 0x8 36 0x8 37 0x8>;
phy-connection-type = "mii";
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy3>;
};
diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts
index b11e68f56a06..23c10ce22c2c 100644
--- a/arch/powerpc/boot/dts/mpc8378_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts
@@ -117,7 +117,6 @@
interrupt-parent = <&ipic>;
dfsrr;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
};
@@ -211,8 +210,25 @@
reg = <0x2>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
+
enet0: ethernet@24000 {
cell-index = <0>;
device_type = "network";
diff --git a/arch/powerpc/boot/dts/mpc8379_mds.dts b/arch/powerpc/boot/dts/mpc8379_mds.dts
index 323370a2b5ff..acf06c438dbf 100644
--- a/arch/powerpc/boot/dts/mpc8379_mds.dts
+++ b/arch/powerpc/boot/dts/mpc8379_mds.dts
@@ -232,6 +232,22 @@
reg = <0x3>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -244,6 +260,7 @@
interrupts = <32 0x8 33 0x8 34 0x8>;
phy-connection-type = "mii";
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy2>;
};
@@ -257,6 +274,7 @@
interrupts = <35 0x8 36 0x8 37 0x8>;
phy-connection-type = "mii";
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy3>;
};
diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts
index 337af6ea26d3..72cdc3c4c7e3 100644
--- a/arch/powerpc/boot/dts/mpc8379_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts
@@ -117,7 +117,6 @@
interrupt-parent = <&ipic>;
dfsrr;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
};
@@ -211,6 +210,22 @@
reg = <0x2>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -223,6 +238,7 @@
interrupts = <32 0x8 33 0x8 34 0x8>;
phy-connection-type = "mii";
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy2>;
};
@@ -237,6 +253,7 @@
phy-connection-type = "mii";
interrupt-parent = <&ipic>;
fixed-link = <1 1 1000 0 0>;
+ tbi-handle = <&tbi1>;
};
serial0: serial@4500 {
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dts b/arch/powerpc/boot/dts/mpc8536ds.dts
index 35db1e5440c7..3c905df1812c 100644
--- a/arch/powerpc/boot/dts/mpc8536ds.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds.dts
@@ -155,6 +155,22 @@
reg = <1>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x26520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
usb@22000 {
@@ -186,6 +202,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy1>;
phy-connection-type = "rgmii-id";
};
@@ -199,6 +216,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <31 2 32 2 33 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy0>;
phy-connection-type = "rgmii-id";
};
diff --git a/arch/powerpc/boot/dts/mpc8540ads.dts b/arch/powerpc/boot/dts/mpc8540ads.dts
index 9568bfaff8f7..79570ffe41b9 100644
--- a/arch/powerpc/boot/dts/mpc8540ads.dts
+++ b/arch/powerpc/boot/dts/mpc8540ads.dts
@@ -150,6 +150,34 @@
reg = <0x3>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x26520 0x20>;
+
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -161,6 +189,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
};
@@ -173,6 +202,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
};
@@ -185,6 +215,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <41 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi2>;
phy-handle = <&phy3>;
};
diff --git a/arch/powerpc/boot/dts/mpc8541cds.dts b/arch/powerpc/boot/dts/mpc8541cds.dts
index 6480f4fd96e0..221036a8ce23 100644
--- a/arch/powerpc/boot/dts/mpc8541cds.dts
+++ b/arch/powerpc/boot/dts/mpc8541cds.dts
@@ -144,6 +144,22 @@
reg = <0x1>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -155,6 +171,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
};
@@ -167,6 +184,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
};
diff --git a/arch/powerpc/boot/dts/mpc8544ds.dts b/arch/powerpc/boot/dts/mpc8544ds.dts
index f1fb20737e3e..b9da42105066 100644
--- a/arch/powerpc/boot/dts/mpc8544ds.dts
+++ b/arch/powerpc/boot/dts/mpc8544ds.dts
@@ -116,8 +116,26 @@
reg = <0x1>;
device_type = "ethernet-phy";
};
+
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
+ mdio@26520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x26520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+
dma@21300 {
#address-cells = <1>;
#size-cells = <1>;
@@ -169,6 +187,7 @@
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
phy-handle = <&phy0>;
+ tbi-handle = <&tbi0>;
phy-connection-type = "rgmii-id";
};
@@ -182,6 +201,7 @@
interrupts = <31 2 32 2 33 2>;
interrupt-parent = <&mpic>;
phy-handle = <&phy1>;
+ tbi-handle = <&tbi1>;
phy-connection-type = "rgmii-id";
};
diff --git a/arch/powerpc/boot/dts/mpc8548cds.dts b/arch/powerpc/boot/dts/mpc8548cds.dts
index 431b496270dc..df774a7088ff 100644
--- a/arch/powerpc/boot/dts/mpc8548cds.dts
+++ b/arch/powerpc/boot/dts/mpc8548cds.dts
@@ -172,6 +172,46 @@
reg = <0x3>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x26520 0x20>;
+
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@27520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x27520 0x20>;
+
+ tbi3: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -183,6 +223,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
};
@@ -195,6 +236,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
};
@@ -208,6 +250,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <31 2 32 2 33 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi2>;
phy-handle = <&phy2>;
};
@@ -220,6 +263,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <37 2 38 2 39 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi3>;
phy-handle = <&phy3>;
};
*/
diff --git a/arch/powerpc/boot/dts/mpc8555cds.dts b/arch/powerpc/boot/dts/mpc8555cds.dts
index d833a5c4f476..053b01e1c93b 100644
--- a/arch/powerpc/boot/dts/mpc8555cds.dts
+++ b/arch/powerpc/boot/dts/mpc8555cds.dts
@@ -144,6 +144,22 @@
reg = <0x1>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -155,6 +171,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
};
@@ -167,6 +184,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
};
diff --git a/arch/powerpc/boot/dts/mpc8560ads.dts b/arch/powerpc/boot/dts/mpc8560ads.dts
index 4d1f2f284094..11b1bcbe14ce 100644
--- a/arch/powerpc/boot/dts/mpc8560ads.dts
+++ b/arch/powerpc/boot/dts/mpc8560ads.dts
@@ -145,6 +145,22 @@
reg = <0x3>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -156,6 +172,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
};
@@ -168,6 +185,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
};
diff --git a/arch/powerpc/boot/dts/mpc8568mds.dts b/arch/powerpc/boot/dts/mpc8568mds.dts
index c80158f7741d..1955bd9e113d 100644
--- a/arch/powerpc/boot/dts/mpc8568mds.dts
+++ b/arch/powerpc/boot/dts/mpc8568mds.dts
@@ -179,6 +179,22 @@
reg = <0x3>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -190,6 +206,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy2>;
};
@@ -202,6 +219,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy3>;
};
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts
index 5c69b2fafd32..21459e161d02 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds.dts
@@ -63,6 +63,119 @@
device_type = "memory";
};
+ localbus@ffe05000 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8572-elbc", "fsl,elbc", "simple-bus";
+ reg = <0 0xffe05000 0 0x1000>;
+ interrupts = <19 2>;
+ interrupt-parent = <&mpic>;
+
+ ranges = <0x0 0x0 0x0 0xe8000000 0x08000000
+ 0x1 0x0 0x0 0xe0000000 0x08000000
+ 0x2 0x0 0x0 0xffa00000 0x00040000
+ 0x3 0x0 0x0 0xffdf0000 0x00008000
+ 0x4 0x0 0x0 0xffa40000 0x00040000
+ 0x5 0x0 0x0 0xffa80000 0x00040000
+ 0x6 0x0 0x0 0xffac0000 0x00040000>;
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x8000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ ramdisk@0 {
+ reg = <0x0 0x03000000>;
+ readl-only;
+ };
+
+ diagnostic@3000000 {
+ reg = <0x03000000 0x00e00000>;
+ read-only;
+ };
+
+ dink@3e00000 {
+ reg = <0x03e00000 0x00200000>;
+ read-only;
+ };
+
+ kernel@4000000 {
+ reg = <0x04000000 0x00400000>;
+ read-only;
+ };
+
+ jffs2@4400000 {
+ reg = <0x04400000 0x03b00000>;
+ };
+
+ dtb@7f00000 {
+ reg = <0x07f00000 0x00080000>;
+ read-only;
+ };
+
+ u-boot@7f80000 {
+ reg = <0x07f80000 0x00080000>;
+ read-only;
+ };
+ };
+
+ nand@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8572-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x2 0x0 0x40000>;
+
+ u-boot@0 {
+ reg = <0x0 0x02000000>;
+ read-only;
+ };
+
+ jffs2@2000000 {
+ reg = <0x02000000 0x10000000>;
+ };
+
+ ramdisk@12000000 {
+ reg = <0x12000000 0x08000000>;
+ read-only;
+ };
+
+ kernel@1a000000 {
+ reg = <0x1a000000 0x04000000>;
+ };
+
+ dtb@1e000000 {
+ reg = <0x1e000000 0x01000000>;
+ read-only;
+ };
+
+ empty@1f000000 {
+ reg = <0x1f000000 0x21000000>;
+ };
+ };
+
+ nand@4,0 {
+ compatible = "fsl,mpc8572-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x4 0x0 0x40000>;
+ };
+
+ nand@5,0 {
+ compatible = "fsl,mpc8572-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x5 0x0 0x40000>;
+ };
+
+ nand@6,0 {
+ compatible = "fsl,mpc8572-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x6 0x0 0x40000>;
+ };
+ };
+
soc8572@ffe00000 {
#address-cells = <1>;
#size-cells = <1>;
@@ -225,6 +338,47 @@
interrupts = <10 1>;
reg = <0x3>;
};
+
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x26520 0x20>;
+
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@27520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x27520 0x20>;
+
+ tbi3: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -236,6 +390,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
phy-connection-type = "rgmii-id";
};
@@ -249,6 +404,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
phy-connection-type = "rgmii-id";
};
@@ -262,6 +418,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <31 2 32 2 33 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi2>;
phy-handle = <&phy2>;
phy-connection-type = "rgmii-id";
};
@@ -275,6 +432,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <37 2 38 2 39 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi3>;
phy-handle = <&phy3>;
phy-connection-type = "rgmii-id";
};
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
new file mode 100644
index 000000000000..c114c4ee9931
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
@@ -0,0 +1,483 @@
+/*
+ * MPC8572 DS Core0 Device Tree Source in CAMP mode.
+ *
+ * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
+ * can be shared, all the other devices must be assigned to one core only.
+ * This dts file allows core0 to have memory, l2, i2c, dma1, global-util, eth0,
+ * eth1, crypto, pci0, pci1.
+ *
+ * Copyright 2007, 2008 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+/ {
+ model = "fsl,MPC8572DS";
+ compatible = "fsl,MPC8572DS", "fsl,MPC8572DS-CAMP";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ aliases {
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ serial0 = &serial0;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,8572@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ d-cache-line-size = <32>; // 32 bytes
+ i-cache-line-size = <32>; // 32 bytes
+ d-cache-size = <0x8000>; // L1, 32K
+ i-cache-size = <0x8000>; // L1, 32K
+ timebase-frequency = <0>;
+ bus-frequency = <0>;
+ clock-frequency = <0>;
+ next-level-cache = <&L2>;
+ };
+
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>; // Filled by U-Boot
+ };
+
+ soc8572@ffe00000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+ ranges = <0x0 0xffe00000 0x100000>;
+ reg = <0xffe00000 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed
+ bus-frequency = <0>; // Filled out by uboot.
+
+ memory-controller@2000 {
+ compatible = "fsl,mpc8572-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <18 2>;
+ };
+
+ memory-controller@6000 {
+ compatible = "fsl,mpc8572-memory-controller";
+ reg = <0x6000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <18 2>;
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,mpc8572-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x80000>; // L2, 512K
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+
+ i2c@3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x3000 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ i2c@3100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x3100 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ dma@21300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
+ reg = <0x21300 0x4>;
+ ranges = <0x0 0x21100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <20 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <21 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <22 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <23 2>;
+ };
+ };
+
+ mdio@24520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-mdio";
+ reg = <0x24520 0x20>;
+
+ phy0: ethernet-phy@0 {
+ interrupt-parent = <&mpic>;
+ interrupts = <10 1>;
+ reg = <0x0>;
+ };
+ phy1: ethernet-phy@1 {
+ interrupt-parent = <&mpic>;
+ interrupts = <10 1>;
+ reg = <0x1>;
+ };
+ };
+
+ enet0: ethernet@24000 {
+ cell-index = <0>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x24000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <29 2 30 2 34 2>;
+ interrupt-parent = <&mpic>;
+ phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ enet1: ethernet@25000 {
+ cell-index = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x25000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <35 2 36 2 40 2>;
+ interrupt-parent = <&mpic>;
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ serial0: serial@4500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4500 0x100>;
+ clock-frequency = <0>;
+ };
+
+ global-utilities@e0000 { //global utilities block
+ compatible = "fsl,mpc8572-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+
+ crypto@30000 {
+ compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2",
+ "fsl,sec2.1", "fsl,sec2.0";
+ reg = <0x30000 0x10000>;
+ interrupts = <45 2 58 2>;
+ interrupt-parent = <&mpic>;
+ fsl,num-channels = <4>;
+ fsl,channel-fifo-len = <24>;
+ fsl,exec-units-mask = <0x9fe>;
+ fsl,descriptor-types-mask = <0x3ab0ebf>;
+ };
+
+ mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x40000 0x40000>;
+ compatible = "chrp,open-pic";
+ device_type = "open-pic";
+ protected-sources = <
+ 31 32 33 37 38 39 /* enet2 enet3 */
+ 76 77 78 79 27 42 /* dma2 pci2 serial*/
+ 0xe0 0xe1 0xe2 0xe3 /* msi */
+ 0xe4 0xe5 0xe6 0xe7
+ >;
+ };
+ };
+
+ pci0: pcie@ffe08000 {
+ cell-index = <0>;
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0xffe08000 0x1000>;
+ bus-range = <0 255>;
+ ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x0 0xffc00000 0x0 0x10000>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <24 2>;
+ interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x11 func 0 - PCI slot 1 */
+ 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 func 1 - PCI slot 1 */
+ 0x8900 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8900 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8900 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8900 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 func 2 - PCI slot 1 */
+ 0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 func 3 - PCI slot 1 */
+ 0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 func 4 - PCI slot 1 */
+ 0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 func 5 - PCI slot 1 */
+ 0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 func 6 - PCI slot 1 */
+ 0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 func 7 - PCI slot 1 */
+ 0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x12 func 0 - PCI slot 2 */
+ 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9000 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9000 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ /* IDSEL 0x12 func 1 - PCI slot 2 */
+ 0x9100 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9100 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9100 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9100 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ /* IDSEL 0x12 func 2 - PCI slot 2 */
+ 0x9200 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9200 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9200 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9200 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ /* IDSEL 0x12 func 3 - PCI slot 2 */
+ 0x9300 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9300 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9300 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9300 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ /* IDSEL 0x12 func 4 - PCI slot 2 */
+ 0x9400 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9400 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9400 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9400 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ /* IDSEL 0x12 func 5 - PCI slot 2 */
+ 0x9500 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9500 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9500 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9500 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ /* IDSEL 0x12 func 6 - PCI slot 2 */
+ 0x9600 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9600 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9600 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9600 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ /* IDSEL 0x12 func 7 - PCI slot 2 */
+ 0x9700 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9700 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9700 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9700 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ // IDSEL 0x1c USB
+ 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
+ 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
+ 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
+ 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
+
+ // IDSEL 0x1d Audio
+ 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
+
+ // IDSEL 0x1e Legacy
+ 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
+ 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
+
+ // IDSEL 0x1f IDE/SATA
+ 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
+ 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
+
+ >;
+
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ uli1575@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ isa@1e {
+ device_type = "isa";
+ #interrupt-cells = <2>;
+ #size-cells = <1>;
+ #address-cells = <2>;
+ reg = <0xf000 0x0 0x0 0x0 0x0>;
+ ranges = <0x1 0x0 0x1000000 0x0 0x0
+ 0x1000>;
+ interrupt-parent = <&i8259>;
+
+ i8259: interrupt-controller@20 {
+ reg = <0x1 0x20 0x2
+ 0x1 0xa0 0x2
+ 0x1 0x4d0 0x2>;
+ interrupt-controller;
+ device_type = "interrupt-controller";
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ compatible = "chrp,iic";
+ interrupts = <9 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ i8042@60 {
+ #size-cells = <0>;
+ #address-cells = <1>;
+ reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
+ interrupts = <1 3 12 3>;
+ interrupt-parent =
+ <&i8259>;
+
+ keyboard@0 {
+ reg = <0x0>;
+ compatible = "pnpPNP,303";
+ };
+
+ mouse@1 {
+ reg = <0x1>;
+ compatible = "pnpPNP,f03";
+ };
+ };
+
+ rtc@70 {
+ compatible = "pnpPNP,b00";
+ reg = <0x1 0x70 0x2>;
+ };
+
+ gpio@400 {
+ reg = <0x1 0x400 0x80>;
+ };
+ };
+ };
+ };
+
+ };
+
+ pci1: pcie@ffe09000 {
+ cell-index = <1>;
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0xffe09000 0x1000>;
+ bus-range = <0 255>;
+ ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x0 0xffc10000 0x0 0x10000>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <26 2>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1
+ >;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
new file mode 100644
index 000000000000..04ecda18d206
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
@@ -0,0 +1,234 @@
+/*
+ * MPC8572 DS Core1 Device Tree Source in CAMP mode.
+ *
+ * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
+ * can be shared, all the other devices must be assigned to one core only.
+ * This dts allows core1 to have l2, dma2, eth2, eth3, pci2, msi.
+ *
+ * Please note to add "-b 1" for core1's dts compiling.
+ *
+ * Copyright 2007, 2008 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+/ {
+ model = "fsl,MPC8572DS";
+ compatible = "fsl,MPC8572DS", "fsl,MPC8572DS-CAMP";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ aliases {
+ ethernet2 = &enet2;
+ ethernet3 = &enet3;
+ serial0 = &serial0;
+ pci2 = &pci2;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,8572@1 {
+ device_type = "cpu";
+ reg = <0x1>;
+ d-cache-line-size = <32>; // 32 bytes
+ i-cache-line-size = <32>; // 32 bytes
+ d-cache-size = <0x8000>; // L1, 32K
+ i-cache-size = <0x8000>; // L1, 32K
+ timebase-frequency = <0>;
+ bus-frequency = <0>;
+ clock-frequency = <0>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>; // Filled by U-Boot
+ };
+
+ soc8572@ffe00000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+ ranges = <0x0 0xffe00000 0x100000>;
+ reg = <0xffe00000 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed
+ bus-frequency = <0>; // Filled out by uboot.
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,mpc8572-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x80000>; // L2, 512K
+ interrupt-parent = <&mpic>;
+ };
+
+ dma@c300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
+ reg = <0xc300 0x4>;
+ ranges = <0x0 0xc100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <76 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <77 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <78 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <79 2>;
+ };
+ };
+
+ mdio@24520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-mdio";
+ reg = <0x24520 0x20>;
+
+ phy2: ethernet-phy@2 {
+ interrupt-parent = <&mpic>;
+ reg = <0x2>;
+ };
+ phy3: ethernet-phy@3 {
+ interrupt-parent = <&mpic>;
+ reg = <0x3>;
+ };
+ };
+
+ enet2: ethernet@26000 {
+ cell-index = <2>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x26000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <31 2 32 2 33 2>;
+ interrupt-parent = <&mpic>;
+ phy-handle = <&phy2>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ enet3: ethernet@27000 {
+ cell-index = <3>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x27000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <37 2 38 2 39 2>;
+ interrupt-parent = <&mpic>;
+ phy-handle = <&phy3>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ msi@41600 {
+ compatible = "fsl,mpc8572-msi", "fsl,mpic-msi";
+ reg = <0x41600 0x80>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe0 0
+ 0xe1 0
+ 0xe2 0
+ 0xe3 0
+ 0xe4 0
+ 0xe5 0
+ 0xe6 0
+ 0xe7 0>;
+ interrupt-parent = <&mpic>;
+ };
+
+ serial0: serial@4600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4600 0x100>;
+ clock-frequency = <0>;
+ };
+
+ mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x40000 0x40000>;
+ compatible = "chrp,open-pic";
+ device_type = "open-pic";
+ protected-sources = <
+ 18 16 10 42 45 58 /* MEM L2 mdio serial crypto */
+ 29 30 34 35 36 40 /* enet0 enet1 */
+ 24 26 20 21 22 23 /* pcie0 pcie1 dma1 */
+ 43 /* i2c */
+ 0x1 0x2 0x3 0x4 /* pci slot */
+ 0x9 0xa 0xb 0xc /* usb */
+ 0x6 0x7 0xe 0x5 /* Audio elgacy SATA */
+ >;
+ };
+ };
+
+ pci2: pcie@ffe0a000 {
+ cell-index = <2>;
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0xffe0a000 0x1000>;
+ bus-range = <0 255>;
+ ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000
+ 0x1000000 0x0 0x0 0xffc20000 0x0 0x10000>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <27 2>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1
+ >;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc8641_hpcn.dts b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
index d665e767822a..4481532cbe77 100644
--- a/arch/powerpc/boot/dts/mpc8641_hpcn.dts
+++ b/arch/powerpc/boot/dts/mpc8641_hpcn.dts
@@ -26,7 +26,13 @@
serial1 = &serial1;
pci0 = &pci0;
pci1 = &pci1;
- rapidio0 = &rapidio0;
+/*
+ * Only one of Rapid IO or PCI can be present due to HW limitations and
+ * due to the fact that the 2 now share address space in the new memory
+ * map. The most likely case is that we have PCI, so comment out the
+ * rapidio node. Leave it here for reference.
+ */
+ /* rapidio0 = &rapidio0; */
};
cpus {
@@ -62,18 +68,17 @@
reg = <0x00000000 0x40000000>; // 1G at 0x0
};
- localbus@f8005000 {
+ localbus@ffe05000 {
#address-cells = <2>;
#size-cells = <1>;
compatible = "fsl,mpc8641-localbus", "simple-bus";
- reg = <0xf8005000 0x1000>;
+ reg = <0xffe05000 0x1000>;
interrupts = <19 2>;
interrupt-parent = <&mpic>;
- ranges = <0 0 0xff800000 0x00800000
- 1 0 0xfe000000 0x01000000
- 2 0 0xf8200000 0x00100000
- 3 0 0xf8100000 0x00100000>;
+ ranges = <0 0 0xef800000 0x00800000
+ 2 0 0xffdf8000 0x00008000
+ 3 0 0xffdf0000 0x00008000>;
flash@0,0 {
compatible = "cfi-flash";
@@ -103,13 +108,13 @@
};
};
- soc8641@f8000000 {
+ soc8641@ffe00000 {
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
compatible = "simple-bus";
- ranges = <0x00000000 0xf8000000 0x00100000>;
- reg = <0xf8000000 0x00001000>; // CCSRBAR
+ ranges = <0x00000000 0xffe00000 0x00100000>;
+ reg = <0xffe00000 0x00001000>; // CCSRBAR
bus-frequency = <0>;
i2c@3000 {
@@ -205,8 +210,49 @@
reg = <3>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x26520 0x20>;
+
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@27520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x27520 0x20>;
+
+ tbi3: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
+
enet0: ethernet@24000 {
cell-index = <0>;
device_type = "network";
@@ -216,6 +262,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
phy-connection-type = "rgmii-id";
};
@@ -229,6 +276,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
phy-connection-type = "rgmii-id";
};
@@ -242,6 +290,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <31 2 32 2 33 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi2>;
phy-handle = <&phy2>;
phy-connection-type = "rgmii-id";
};
@@ -255,6 +304,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <37 2 38 2 39 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi3>;
phy-handle = <&phy3>;
phy-connection-type = "rgmii-id";
};
@@ -295,17 +345,17 @@
};
};
- pci0: pcie@f8008000 {
+ pci0: pcie@ffe08000 {
cell-index = <0>;
compatible = "fsl,mpc8641-pcie";
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
- reg = <0xf8008000 0x1000>;
+ reg = <0xffe08000 0x1000>;
bus-range = <0x0 0xff>;
ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x20000000
- 0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
+ 0x01000000 0x0 0x00000000 0xffc00000 0x0 0x00010000>;
clock-frequency = <33333333>;
interrupt-parent = <&mpic>;
interrupts = <24 2>;
@@ -436,7 +486,7 @@
0x01000000 0x0 0x00000000
0x01000000 0x0 0x00000000
- 0x0 0x00100000>;
+ 0x0 0x00010000>;
uli1575@0 {
reg = <0 0 0 0 0>;
#size-cells = <2>;
@@ -446,7 +496,7 @@
0x0 0x20000000
0x01000000 0x0 0x00000000
0x01000000 0x0 0x00000000
- 0x0 0x00100000>;
+ 0x0 0x00010000>;
isa@1e {
device_type = "isa";
#interrupt-cells = <2>;
@@ -504,17 +554,17 @@
};
- pci1: pcie@f8009000 {
+ pci1: pcie@ffe09000 {
cell-index = <1>;
compatible = "fsl,mpc8641-pcie";
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
- reg = <0xf8009000 0x1000>;
+ reg = <0xffe09000 0x1000>;
bus-range = <0 0xff>;
ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
- 0x01000000 0x0 0x00000000 0xe3000000 0x0 0x00100000>;
+ 0x01000000 0x0 0x00000000 0xffc10000 0x0 0x00010000>;
clock-frequency = <33333333>;
interrupt-parent = <&mpic>;
interrupts = <25 2>;
@@ -537,18 +587,21 @@
0x01000000 0x0 0x00000000
0x01000000 0x0 0x00000000
- 0x0 0x00100000>;
+ 0x0 0x00010000>;
};
};
- rapidio0: rapidio@f80c0000 {
+/*
+ rapidio0: rapidio@ffec0000 {
#address-cells = <2>;
#size-cells = <2>;
compatible = "fsl,rapidio-delta";
- reg = <0xf80c0000 0x20000>;
- ranges = <0 0 0xc0000000 0 0x20000000>;
+ reg = <0xffec0000 0x20000>;
+ ranges = <0 0 0x80000000 0 0x20000000>;
interrupt-parent = <&mpic>;
- /* err_irq bell_outb_irq bell_inb_irq
- msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq */
+ // err_irq bell_outb_irq bell_inb_irq
+ // msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq
interrupts = <48 2 49 2 50 2 53 2 54 2 55 2 56 2>;
};
+*/
+
};
diff --git a/arch/powerpc/boot/dts/pcm030.dts b/arch/powerpc/boot/dts/pcm030.dts
index 7c1bb952360c..be2c11ca0594 100644
--- a/arch/powerpc/boot/dts/pcm030.dts
+++ b/arch/powerpc/boot/dts/pcm030.dts
@@ -143,7 +143,6 @@
rtc@800 { // Real time clock
compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
- device_type = "rtc";
reg = <0x800 0x100>;
interrupts = <0x1 0x5 0x0 0x1 0x6 0x0>;
interrupt-parent = <&mpc5200_pic>;
@@ -301,7 +300,6 @@
interrupt-parent = <&mpc5200_pic>;
fsl5200-clocking;
rtc@51 {
- device_type = "rtc";
compatible = "nxp,pcf8563";
reg = <0x51>;
};
diff --git a/arch/powerpc/boot/dts/sbc8349.dts b/arch/powerpc/boot/dts/sbc8349.dts
index 0f941f310e44..8d365a57ebc1 100644
--- a/arch/powerpc/boot/dts/sbc8349.dts
+++ b/arch/powerpc/boot/dts/sbc8349.dts
@@ -177,6 +177,22 @@
reg = <0x1a>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -188,6 +204,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <32 0x8 33 0x8 34 0x8>;
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
linux,network-index = <0>;
};
@@ -201,6 +218,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 0x8 36 0x8 37 0x8>;
interrupt-parent = <&ipic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
linux,network-index = <1>;
};
diff --git a/arch/powerpc/boot/dts/sbc8548.dts b/arch/powerpc/boot/dts/sbc8548.dts
index 333552b4e90d..2baf4a51f224 100644
--- a/arch/powerpc/boot/dts/sbc8548.dts
+++ b/arch/powerpc/boot/dts/sbc8548.dts
@@ -252,6 +252,22 @@
reg = <0x1a>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -263,6 +279,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
};
@@ -275,6 +292,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
};
diff --git a/arch/powerpc/boot/dts/sbc8560.dts b/arch/powerpc/boot/dts/sbc8560.dts
index db3632ef9888..01542f7062ab 100644
--- a/arch/powerpc/boot/dts/sbc8560.dts
+++ b/arch/powerpc/boot/dts/sbc8560.dts
@@ -168,6 +168,22 @@
reg = <0x1c>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -179,6 +195,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
};
@@ -191,6 +208,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
};
diff --git a/arch/powerpc/boot/dts/sbc8641d.dts b/arch/powerpc/boot/dts/sbc8641d.dts
index 9652456158fb..36db981548e4 100644
--- a/arch/powerpc/boot/dts/sbc8641d.dts
+++ b/arch/powerpc/boot/dts/sbc8641d.dts
@@ -222,6 +222,46 @@
reg = <2>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x26520 0x20>;
+
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@27520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x27520 0x20>;
+
+ tbi3: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -233,6 +273,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy0>;
phy-connection-type = "rgmii-id";
};
@@ -246,6 +287,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
phy-connection-type = "rgmii-id";
};
@@ -259,6 +301,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <31 2 32 2 33 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi2>;
phy-handle = <&phy2>;
phy-connection-type = "rgmii-id";
};
@@ -272,6 +315,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <37 2 38 2 39 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi3>;
phy-handle = <&phy3>;
phy-connection-type = "rgmii-id";
};
diff --git a/arch/powerpc/boot/dts/sequoia.dts b/arch/powerpc/boot/dts/sequoia.dts
index 3b295e8df53f..43cc68bd3192 100644
--- a/arch/powerpc/boot/dts/sequoia.dts
+++ b/arch/powerpc/boot/dts/sequoia.dts
@@ -134,7 +134,7 @@
};
USB1: usb@e0000400 {
- compatible = "ohci-be";
+ compatible = "ibm,usb-ohci-440epx", "ohci-be";
reg = <0x00000000 0xe0000400 0x00000060>;
interrupt-parent = <&UIC0>;
interrupts = <0x15 0x8>;
diff --git a/arch/powerpc/boot/dts/stx_gp3_8560.dts b/arch/powerpc/boot/dts/stx_gp3_8560.dts
index fcd1db6ca0a8..fff33fe6efc6 100644
--- a/arch/powerpc/boot/dts/stx_gp3_8560.dts
+++ b/arch/powerpc/boot/dts/stx_gp3_8560.dts
@@ -142,6 +142,22 @@
reg = <4>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -153,6 +169,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy2>;
};
@@ -165,6 +182,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy4>;
};
diff --git a/arch/powerpc/boot/dts/tqm5200.dts b/arch/powerpc/boot/dts/tqm5200.dts
index 3008bf8830c1..906302e26a62 100644
--- a/arch/powerpc/boot/dts/tqm5200.dts
+++ b/arch/powerpc/boot/dts/tqm5200.dts
@@ -181,7 +181,6 @@
fsl5200-clocking;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1307";
reg = <0x68>;
};
diff --git a/arch/powerpc/boot/dts/tqm8540.dts b/arch/powerpc/boot/dts/tqm8540.dts
index e1d260b9085e..a693f01c21aa 100644
--- a/arch/powerpc/boot/dts/tqm8540.dts
+++ b/arch/powerpc/boot/dts/tqm8540.dts
@@ -155,6 +155,34 @@
reg = <3>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x26520 0x20>;
+
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
diff --git a/arch/powerpc/boot/dts/tqm8541.dts b/arch/powerpc/boot/dts/tqm8541.dts
index d76441ec5dc7..9e3f5f0dde20 100644
--- a/arch/powerpc/boot/dts/tqm8541.dts
+++ b/arch/powerpc/boot/dts/tqm8541.dts
@@ -154,6 +154,22 @@
reg = <3>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -165,6 +181,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy2>;
};
@@ -177,6 +194,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
};
diff --git a/arch/powerpc/boot/dts/tqm8548-bigflash.dts b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
index 4199e89b4e50..15086eb65c50 100644
--- a/arch/powerpc/boot/dts/tqm8548-bigflash.dts
+++ b/arch/powerpc/boot/dts/tqm8548-bigflash.dts
@@ -179,6 +179,46 @@
reg = <5>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x26520 0x20>;
+
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@27520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x27520 0x20>;
+
+ tbi3: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -190,6 +230,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy2>;
};
@@ -202,6 +243,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
};
@@ -214,6 +256,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <31 2 32 2 33 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi2>;
phy-handle = <&phy3>;
};
@@ -226,6 +269,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <37 2 38 2 39 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi3>;
phy-handle = <&phy4>;
};
diff --git a/arch/powerpc/boot/dts/tqm8548.dts b/arch/powerpc/boot/dts/tqm8548.dts
index 58ee4185454b..b7b65f5e79b6 100644
--- a/arch/powerpc/boot/dts/tqm8548.dts
+++ b/arch/powerpc/boot/dts/tqm8548.dts
@@ -179,6 +179,46 @@
reg = <5>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x26520 0x20>;
+
+ tbi2: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@27520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x27520 0x20>;
+
+ tbi3: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -190,6 +230,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy2>;
};
@@ -202,6 +243,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
};
@@ -214,6 +256,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <31 2 32 2 33 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi2>;
phy-handle = <&phy3>;
};
@@ -226,6 +269,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <37 2 38 2 39 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi3>;
phy-handle = <&phy4>;
};
diff --git a/arch/powerpc/boot/dts/tqm8555.dts b/arch/powerpc/boot/dts/tqm8555.dts
index 6f7ea59c4846..cf92b4e7945e 100644
--- a/arch/powerpc/boot/dts/tqm8555.dts
+++ b/arch/powerpc/boot/dts/tqm8555.dts
@@ -154,6 +154,22 @@
reg = <3>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -165,6 +181,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy2>;
};
@@ -177,6 +194,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
};
diff --git a/arch/powerpc/boot/dts/tqm8560.dts b/arch/powerpc/boot/dts/tqm8560.dts
index 3fe35208907b..9e1ab2d2f669 100644
--- a/arch/powerpc/boot/dts/tqm8560.dts
+++ b/arch/powerpc/boot/dts/tqm8560.dts
@@ -156,6 +156,22 @@
reg = <3>;
device_type = "ethernet-phy";
};
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@25520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-tbi";
+ reg = <0x25520 0x20>;
+
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
};
enet0: ethernet@24000 {
@@ -167,6 +183,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <29 2 30 2 34 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi0>;
phy-handle = <&phy2>;
};
@@ -179,6 +196,7 @@
local-mac-address = [ 00 00 00 00 00 00 ];
interrupts = <35 2 36 2 40 2>;
interrupt-parent = <&mpic>;
+ tbi-handle = <&tbi1>;
phy-handle = <&phy1>;
};
diff --git a/arch/powerpc/boot/install.sh b/arch/powerpc/boot/install.sh
index b002bfd56786..51b2387bdba0 100644
--- a/arch/powerpc/boot/install.sh
+++ b/arch/powerpc/boot/install.sh
@@ -15,7 +15,7 @@
# $2 - kernel image file
# $3 - kernel map file
# $4 - default install path (blank if root directory)
-# $5 - kernel boot file, the zImage
+# $5 and more - kernel boot files; zImage*, uImage, cuImage.*, etc.
#
# User may have a custom install script
@@ -38,3 +38,15 @@ fi
cat $2 > $4/$image_name
cp $3 $4/System.map
+
+# Copy all the bootable image files
+path=$4
+shift 4
+while [ $# -ne 0 ]; do
+ image_name=`basename $1`
+ if [ -f $path/$image_name ]; then
+ mv $path/$image_name $path/$image_name.old
+ fi
+ cat $1 > $path/$image_name
+ shift
+done;
diff --git a/arch/powerpc/boot/libfdt-wrapper.c b/arch/powerpc/boot/libfdt-wrapper.c
index 9276327bc2bb..bb8b9b3505ee 100644
--- a/arch/powerpc/boot/libfdt-wrapper.c
+++ b/arch/powerpc/boot/libfdt-wrapper.c
@@ -185,7 +185,7 @@ void fdt_init(void *blob)
/* Make sure the dt blob is the right version and so forth */
fdt = blob;
- bufsize = fdt_totalsize(fdt) + 4;
+ bufsize = fdt_totalsize(fdt) + EXPAND_GRANULARITY;
buf = malloc(bufsize);
if(!buf)
fatal("malloc failed. can't relocate the device tree\n\r");
diff --git a/arch/powerpc/configs/85xx/mpc8572_ds_defconfig b/arch/powerpc/configs/85xx/mpc8572_ds_defconfig
index 635588319e0d..32aeb79216f7 100644
--- a/arch/powerpc/configs/85xx/mpc8572_ds_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8572_ds_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.28-rc3
-# Sat Nov 8 12:40:13 2008
+# Linux kernel version: 2.6.28-rc8
+# Tue Dec 30 11:17:46 2008
#
# CONFIG_PPC64 is not set
@@ -21,7 +21,10 @@ CONFIG_FSL_BOOKE=y
CONFIG_FSL_EMB_PERFMON=y
# CONFIG_PHYS_64BIT is not set
CONFIG_SPE=y
+CONFIG_PPC_MMU_NOHASH=y
# CONFIG_PPC_MM_SLICES is not set
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
CONFIG_PPC32=y
CONFIG_WORD_SIZE=32
# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
@@ -50,7 +53,7 @@ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
CONFIG_PPC_OF=y
CONFIG_OF=y
CONFIG_PPC_UDBG_16550=y
-# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_GENERIC_TBSYNC=y
CONFIG_AUDIT_ARCH=y
CONFIG_GENERIC_BUG=y
CONFIG_DEFAULT_UIMAGE=y
@@ -62,7 +65,7 @@ CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
# General setup
#
CONFIG_EXPERIMENTAL=y
-CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
@@ -126,6 +129,7 @@ CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
@@ -138,6 +142,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_KMOD=y
+CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
CONFIG_LBD=y
# CONFIG_BLK_DEV_IO_TRACE is not set
@@ -197,6 +202,7 @@ CONFIG_PPC_I8259=y
# CONFIG_CPM2 is not set
CONFIG_FSL_ULI1575=y
# CONFIG_MPC8xxx_GPIO is not set
+# CONFIG_SIMPLE_GPIO is not set
#
# Kernel options
@@ -224,6 +230,7 @@ CONFIG_MATH_EMULATION=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_HAS_WALK_MEMORY=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+# CONFIG_IRQ_ALL_CPUS is not set
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_SELECT_MEMORY_MODEL=y
@@ -241,6 +248,9 @@ CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
CONFIG_UNEVICTABLE_LRU=y
+CONFIG_PPC_4K_PAGES=y
+# CONFIG_PPC_16K_PAGES is not set
+# CONFIG_PPC_64K_PAGES is not set
CONFIG_FORCE_MAX_ZONEORDER=11
CONFIG_PROC_DEVICETREE=y
# CONFIG_CMDLINE_BOOL is not set
@@ -443,8 +453,10 @@ CONFIG_MISC_DEVICES=y
# CONFIG_EEPROM_93CX6 is not set
# CONFIG_SGI_IOC4 is not set
# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -784,6 +796,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_HVC_UDBG is not set
# CONFIG_IPMI_HANDLER is not set
CONFIG_HW_RANDOM=y
CONFIG_NVRAM=y
@@ -869,11 +882,11 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
# CONFIG_THERMAL is not set
# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
@@ -886,14 +899,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_PMIC_DA903X is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM8350_I2C is not set
-
-#
-# Voltage and Current regulators
-#
# CONFIG_REGULATOR is not set
-# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
-# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
-# CONFIG_REGULATOR_BQ24022 is not set
#
# Multimedia devices
@@ -1252,11 +1258,11 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# CONFIG_USB_TMC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
#
#
-# may also be needed; see USB_STORAGE Help for more information
+# see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_DEBUG is not set
@@ -1348,6 +1354,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
#
# SPI RTC drivers
@@ -1624,6 +1631,7 @@ CONFIG_HAVE_FUNCTION_TRACER=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_ARCH_KGDB=y
# CONFIG_KGDB is not set
+CONFIG_PRINT_STACK_DEPTH=64
# CONFIG_DEBUG_STACKOVERFLOW is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_DEBUG_PAGEALLOC is not set
@@ -1649,11 +1657,16 @@ CONFIG_CRYPTO=y
#
# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
-CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
# CONFIG_CRYPTO_NULL is not set
# CONFIG_CRYPTO_CRYPTD is not set
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index 07ccaf89f379..cd1ffa449327 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -1397,8 +1397,11 @@ CONFIG_USB_STORAGE=y
# CONFIG_ACCESSIBILITY is not set
# CONFIG_INFINIBAND is not set
# CONFIG_EDAC is not set
-CONFIG_RTC_LIB=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
#
# RTC interfaces
@@ -1424,6 +1427,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
+CONFIG_RTC_DRV_RX8581=y
#
# SPI RTC drivers
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index cfc94cfcf4cb..034a1fbdc887 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -267,7 +267,7 @@ CONFIG_PCI_SYSCALL=y
# CONFIG_PCIEPORTBUS is not set
CONFIG_ARCH_SUPPORTS_MSI=y
# CONFIG_PCI_MSI is not set
-CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_LEGACY is not set
# CONFIG_PCI_DEBUG is not set
# CONFIG_PCCARD is not set
# CONFIG_HOTPLUG_PCI is not set
@@ -354,7 +354,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y
# CONFIG_IP_SCTP is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
+CONFIG_BRIDGE=m
# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
@@ -579,7 +579,7 @@ CONFIG_NETDEVICES=y
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
+CONFIG_TUN=m
# CONFIG_VETH is not set
# CONFIG_ARCNET is not set
# CONFIG_PHYLIB is not set
@@ -1001,11 +1001,11 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# CONFIG_USB_TMC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
#
#
-# may also be needed; see USB_STORAGE Help for more information
+# see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
@@ -1418,6 +1418,6 @@ CONFIG_CRYPTO_LZO=m
# CONFIG_PPC_CLOCK is not set
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
-CONFIG_KVM_BOOKE_HOST=y
+CONFIG_KVM_440=y
# CONFIG_VIRTIO_PCI is not set
# CONFIG_VIRTIO_BALLOON is not set
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 5ab7d7fe198c..9268602de5d0 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -35,3 +35,4 @@ unifdef-y += spu_info.h
unifdef-y += termios.h
unifdef-y += types.h
unifdef-y += unistd.h
+unifdef-y += swab.h
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index f3fc733758f5..b401950f5259 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -5,7 +5,7 @@
* PowerPC atomic operations
*/
-typedef struct { int counter; } atomic_t;
+#include <linux/types.h>
#ifdef __KERNEL__
#include <linux/compiler.h>
@@ -111,7 +111,7 @@ static __inline__ void atomic_inc(atomic_t *v)
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
- : "cc");
+ : "cc", "xer");
}
static __inline__ int atomic_inc_return(atomic_t *v)
@@ -128,7 +128,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
ISYNC_ON_SMP
: "=&r" (t)
: "r" (&v->counter)
- : "cc", "memory");
+ : "cc", "xer", "memory");
return t;
}
@@ -155,7 +155,7 @@ static __inline__ void atomic_dec(atomic_t *v)
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
- : "cc");
+ : "cc", "xer");
}
static __inline__ int atomic_dec_return(atomic_t *v)
@@ -172,7 +172,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
ISYNC_ON_SMP
: "=&r" (t)
: "r" (&v->counter)
- : "cc", "memory");
+ : "cc", "xer", "memory");
return t;
}
@@ -251,8 +251,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
#ifdef __powerpc64__
-typedef struct { long counter; } atomic64_t;
-
#define ATOMIC64_INIT(i) { (i) }
static __inline__ long atomic64_read(const atomic64_t *v)
@@ -346,7 +344,7 @@ static __inline__ void atomic64_inc(atomic64_t *v)
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
- : "cc");
+ : "cc", "xer");
}
static __inline__ long atomic64_inc_return(atomic64_t *v)
@@ -362,7 +360,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
ISYNC_ON_SMP
: "=&r" (t)
: "r" (&v->counter)
- : "cc", "memory");
+ : "cc", "xer", "memory");
return t;
}
@@ -388,7 +386,7 @@ static __inline__ void atomic64_dec(atomic64_t *v)
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
- : "cc");
+ : "cc", "xer");
}
static __inline__ long atomic64_dec_return(atomic64_t *v)
@@ -404,7 +402,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
ISYNC_ON_SMP
: "=&r" (t)
: "r" (&v->counter)
- : "cc", "memory");
+ : "cc", "xer", "memory");
return t;
}
@@ -431,7 +429,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
"\n\
2:" : "=&r" (t)
: "r" (&v->counter)
- : "cc", "memory");
+ : "cc", "xer", "memory");
return t;
}
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index e55d1f66b86f..64e1fdca233e 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -3,6 +3,7 @@
#ifdef __KERNEL__
#include <asm/asm-compat.h>
+
/*
* Define an illegal instr to trap on the bug.
* We don't use 0 because that marks the end of a function
@@ -14,6 +15,7 @@
#ifdef CONFIG_BUG
#ifdef __ASSEMBLY__
+#include <asm/asm-offsets.h>
#ifdef CONFIG_DEBUG_BUGVERBOSE
.macro EMIT_BUG_ENTRY addr,file,line,flags
.section __bug_table,"a"
@@ -26,7 +28,7 @@
.previous
.endm
#else
- .macro EMIT_BUG_ENTRY addr,file,line,flags
+.macro EMIT_BUG_ENTRY addr,file,line,flags
.section __bug_table,"a"
5001: PPC_LONG \addr
.short \flags
@@ -113,6 +115,13 @@
#define HAVE_ARCH_BUG_ON
#define HAVE_ARCH_WARN_ON
#endif /* __ASSEMBLY __ */
+#else
+#ifdef __ASSEMBLY__
+.macro EMIT_BUG_ENTRY addr,file,line,flags
+.endm
+#else /* !__ASSEMBLY__ */
+#define _EMIT_BUG_ENTRY
+#endif
#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
diff --git a/arch/powerpc/include/asm/byteorder.h b/arch/powerpc/include/asm/byteorder.h
index b37752214a16..5cca27a41532 100644
--- a/arch/powerpc/include/asm/byteorder.h
+++ b/arch/powerpc/include/asm/byteorder.h
@@ -8,82 +8,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <asm/types.h>
-#include <linux/compiler.h>
-
-#ifdef __GNUC__
-#ifdef __KERNEL__
-
-static __inline__ __u16 ld_le16(const volatile __u16 *addr)
-{
- __u16 val;
-
- __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
- return val;
-}
-
-static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
-{
- __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
-}
-
-static __inline__ __u32 ld_le32(const volatile __u32 *addr)
-{
- __u32 val;
-
- __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
- return val;
-}
-
-static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
-{
- __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
-}
-
-static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value)
-{
- __u16 result;
-
- __asm__("rlwimi %0,%1,8,16,23"
- : "=r" (result)
- : "r" (value), "0" (value >> 8));
- return result;
-}
-
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
-{
- __u32 result;
-
- __asm__("rlwimi %0,%1,24,16,23\n\t"
- "rlwimi %0,%1,8,8,15\n\t"
- "rlwimi %0,%1,24,0,7"
- : "=r" (result)
- : "r" (value), "0" (value >> 24));
- return result;
-}
-
-#define __arch__swab16(x) ___arch__swab16(x)
-#define __arch__swab32(x) ___arch__swab32(x)
-
-/* The same, but returns converted value from the location pointer by addr. */
-#define __arch__swab16p(addr) ld_le16(addr)
-#define __arch__swab32p(addr) ld_le32(addr)
-
-/* The same, but do the conversion in situ, ie. put the value back to addr. */
-#define __arch__swab16s(addr) st_le16(addr,*addr)
-#define __arch__swab32s(addr) st_le32(addr,*addr)
-
-#endif /* __KERNEL__ */
-
-#ifndef __STRICT_ANSI__
-#define __BYTEORDER_HAS_U64__
-#ifndef __powerpc64__
-#define __SWAB_64_THRU_32__
-#endif /* __powerpc64__ */
-#endif /* __STRICT_ANSI__ */
-
-#endif /* __GNUC__ */
-
+#include <asm/swab.h>
#include <linux/byteorder/big_endian.h>
#endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 1e94b07a020e..4911104791c3 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -82,6 +82,7 @@ struct cpu_spec {
char *cpu_name;
unsigned long cpu_features; /* Kernel features */
unsigned int cpu_user_features; /* Userland features */
+ unsigned int mmu_features; /* MMU features */
/* cache line sizes */
unsigned int icache_bsize;
@@ -144,17 +145,14 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
#define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080)
#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
-#define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200)
#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000)
-#define CPU_FTR_HAS_HIGH_BATS ASM_CONST(0x0000000000010000)
#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
-#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
@@ -163,6 +161,8 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_SPE ASM_CONST(0x0000000002000000)
#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000)
#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
+#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000)
+#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000)
/*
* Add the 64-bit processor unique features in the top half of the word;
@@ -177,7 +177,6 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000)
#define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000)
#define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000)
-#define CPU_FTR_NOEXECUTE LONG_ASM_CONST(0x0000000800000000)
#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000)
@@ -194,6 +193,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000)
#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000)
#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000)
+#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000)
#ifndef __ASSEMBLY__
@@ -264,164 +264,159 @@ extern const char *powerpc_base_platform;
!defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
!defined(CONFIG_BOOKE))
-#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE | \
+#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE)
#define CPU_FTRS_603 (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
#define CPU_FTRS_604 (CPU_FTR_COMMON | \
- CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_PPC_LE)
+ CPU_FTR_USE_TB | CPU_FTR_PPC_LE)
#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
#define CPU_FTRS_740 (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
- CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_PPC_LE)
#define CPU_FTRS_750 (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
- CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_PPC_LE)
-#define CPU_FTRS_750CL (CPU_FTRS_750 | CPU_FTR_HAS_HIGH_BATS)
+#define CPU_FTRS_750CL (CPU_FTRS_750)
#define CPU_FTRS_750FX1 (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM)
#define CPU_FTRS_750FX2 (CPU_FTRS_750 | CPU_FTR_NO_DPM)
-#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | \
- CPU_FTR_HAS_HIGH_BATS)
+#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX)
#define CPU_FTRS_750GX (CPU_FTRS_750FX)
#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \
+ CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
#define CPU_FTRS_7400 (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
- CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \
+ CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS | \
- CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
+ CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
- CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE)
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
#define CPU_FTRS_7455 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \
CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_7447 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_7447A (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_7448 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_82XX (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB)
#define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
- CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS)
+ CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP)
#define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \
- CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_COMMON)
#define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \
- CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
-#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | \
- CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE)
+#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_USE_TB)
#define CPU_FTRS_8XX (CPU_FTR_USE_TB)
-#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN)
-#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN)
+#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_INDEXED_DCR)
#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
- CPU_FTR_UNIFIED_ID_CACHE)
+ CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
- CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN)
+ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_NOEXECUTE)
#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
- CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | \
- CPU_FTR_NODSISRALIGN)
+ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \
- CPU_FTR_L2CSR | CPU_FTR_LWSYNC)
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE)
+ CPU_FTR_IABR | CPU_FTR_PPC_LE)
#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \
+ CPU_FTR_IABR | \
CPU_FTR_MMCRA | CPU_FTR_CTRL)
#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ)
#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
CPU_FTR_CP_USE_DCBTZ)
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR)
#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR)
+ CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD)
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \
- CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ)
+ CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
+ CPU_FTR_UNALIGNED_LD_STD)
#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
+ CPU_FTR_PPCAS_ARCH_V2 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
-#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
+#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
#ifdef __powerpc64__
#define CPU_FTRS_POSSIBLE \
@@ -452,7 +447,7 @@ enum {
CPU_FTRS_40X |
#endif
#ifdef CONFIG_44x
- CPU_FTRS_44X |
+ CPU_FTRS_44X | CPU_FTRS_440x6 |
#endif
#ifdef CONFIG_E200
CPU_FTRS_E200 |
@@ -492,7 +487,7 @@ enum {
CPU_FTRS_40X &
#endif
#ifdef CONFIG_44x
- CPU_FTRS_44X &
+ CPU_FTRS_44X & CPU_FTRS_440x6 &
#endif
#ifdef CONFIG_E200
CPU_FTRS_E200 &
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 72d2b72c7390..7d2e6235726d 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -23,6 +23,7 @@
#ifndef __ASSEMBLY__
#include <linux/spinlock.h>
+#include <asm/cputable.h>
typedef struct {
unsigned int base;
@@ -39,23 +40,45 @@ static inline bool dcr_map_ok_native(dcr_host_native_t host)
#define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base)
#define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value)
-/* Device Control Registers */
-void __mtdcr(int reg, unsigned int val);
-unsigned int __mfdcr(int reg);
+/* Table based DCR accessors */
+extern void __mtdcr(unsigned int reg, unsigned int val);
+extern unsigned int __mfdcr(unsigned int reg);
+
+/* mfdcrx/mtdcrx instruction based accessors. We hand code
+ * the opcodes in order not to depend on newer binutils
+ */
+static inline unsigned int mfdcrx(unsigned int reg)
+{
+ unsigned int ret;
+ asm volatile(".long 0x7c000206 | (%0 << 21) | (%1 << 16)"
+ : "=r" (ret) : "r" (reg));
+ return ret;
+}
+
+static inline void mtdcrx(unsigned int reg, unsigned int val)
+{
+ asm volatile(".long 0x7c000306 | (%0 << 21) | (%1 << 16)"
+ : : "r" (val), "r" (reg));
+}
+
#define mfdcr(rn) \
({unsigned int rval; \
- if (__builtin_constant_p(rn)) \
+ if (__builtin_constant_p(rn) && rn < 1024) \
asm volatile("mfdcr %0," __stringify(rn) \
: "=r" (rval)); \
+ else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
+ rval = mfdcrx(rn); \
else \
rval = __mfdcr(rn); \
rval;})
#define mtdcr(rn, v) \
do { \
- if (__builtin_constant_p(rn)) \
+ if (__builtin_constant_p(rn) && rn < 1024) \
asm volatile("mtdcr " __stringify(rn) ",%0" \
: : "r" (v)); \
+ else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
+ mtdcrx(rn, v); \
else \
__mtdcr(rn, v); \
} while (0)
@@ -69,8 +92,13 @@ static inline unsigned __mfdcri(int base_addr, int base_data, int reg)
unsigned int val;
spin_lock_irqsave(&dcr_ind_lock, flags);
- __mtdcr(base_addr, reg);
- val = __mfdcr(base_data);
+ if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
+ mtdcrx(base_addr, reg);
+ val = mfdcrx(base_data);
+ } else {
+ __mtdcr(base_addr, reg);
+ val = __mfdcr(base_data);
+ }
spin_unlock_irqrestore(&dcr_ind_lock, flags);
return val;
}
@@ -81,8 +109,13 @@ static inline void __mtdcri(int base_addr, int base_data, int reg,
unsigned long flags;
spin_lock_irqsave(&dcr_ind_lock, flags);
- __mtdcr(base_addr, reg);
- __mtdcr(base_data, val);
+ if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
+ mtdcrx(base_addr, reg);
+ mtdcrx(base_data, val);
+ } else {
+ __mtdcr(base_addr, reg);
+ __mtdcr(base_data, val);
+ }
spin_unlock_irqrestore(&dcr_ind_lock, flags);
}
@@ -93,9 +126,15 @@ static inline void __dcri_clrset(int base_addr, int base_data, int reg,
unsigned int val;
spin_lock_irqsave(&dcr_ind_lock, flags);
- __mtdcr(base_addr, reg);
- val = (__mfdcr(base_data) & ~clr) | set;
- __mtdcr(base_data, val);
+ if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
+ mtdcrx(base_addr, reg);
+ val = (mfdcrx(base_data) & ~clr) | set;
+ mtdcrx(base_data, val);
+ } else {
+ __mtdcr(base_addr, reg);
+ val = (__mfdcr(base_data) & ~clr) | set;
+ __mtdcr(base_data, val);
+ }
spin_unlock_irqrestore(&dcr_ind_lock, flags);
}
diff --git a/arch/powerpc/include/asm/dcr.h b/arch/powerpc/include/asm/dcr.h
index d13fb68bb5c0..9d6851cfb841 100644
--- a/arch/powerpc/include/asm/dcr.h
+++ b/arch/powerpc/include/asm/dcr.h
@@ -68,9 +68,9 @@ typedef dcr_host_mmio_t dcr_host_t;
* additional helpers to read the DCR * base from the device-tree
*/
struct device_node;
-extern unsigned int dcr_resource_start(struct device_node *np,
+extern unsigned int dcr_resource_start(const struct device_node *np,
unsigned int index);
-extern unsigned int dcr_resource_len(struct device_node *np,
+extern unsigned int dcr_resource_len(const struct device_node *np,
unsigned int index);
#endif /* CONFIG_PPC_DCR */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index dfd504caccc1..7d2277cef09a 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -18,4 +18,16 @@ struct dev_archdata {
void *dma_data;
};
+static inline void dev_archdata_set_node(struct dev_archdata *ad,
+ struct device_node *np)
+{
+ ad->of_node = np;
+}
+
+static inline struct device_node *
+dev_archdata_get_node(const struct dev_archdata *ad)
+{
+ return ad->of_node;
+}
+
#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h
new file mode 100644
index 000000000000..9b198d1b3b2b
--- /dev/null
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -0,0 +1,80 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __ASM_PPC_DISASSEMBLE_H__
+#define __ASM_PPC_DISASSEMBLE_H__
+
+#include <linux/types.h>
+
+static inline unsigned int get_op(u32 inst)
+{
+ return inst >> 26;
+}
+
+static inline unsigned int get_xop(u32 inst)
+{
+ return (inst >> 1) & 0x3ff;
+}
+
+static inline unsigned int get_sprn(u32 inst)
+{
+ return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_dcrn(u32 inst)
+{
+ return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_rt(u32 inst)
+{
+ return (inst >> 21) & 0x1f;
+}
+
+static inline unsigned int get_rs(u32 inst)
+{
+ return (inst >> 21) & 0x1f;
+}
+
+static inline unsigned int get_ra(u32 inst)
+{
+ return (inst >> 16) & 0x1f;
+}
+
+static inline unsigned int get_rb(u32 inst)
+{
+ return (inst >> 11) & 0x1f;
+}
+
+static inline unsigned int get_rc(u32 inst)
+{
+ return inst & 0x1;
+}
+
+static inline unsigned int get_ws(u32 inst)
+{
+ return (inst >> 11) & 0x1f;
+}
+
+static inline unsigned int get_d(u32 inst)
+{
+ return inst & 0xffff;
+}
+
+#endif /* __ASM_PPC_DISASSEMBLE_H__ */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index fddb229bd74f..86cef7ddc8d5 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -60,12 +60,6 @@ struct dma_mapping_ops {
dma_addr_t *dma_handle, gfp_t flag);
void (*free_coherent)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
- dma_addr_t (*map_single)(struct device *dev, void *ptr,
- size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs);
- void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs);
int (*map_sg)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
struct dma_attrs *attrs);
@@ -82,6 +76,22 @@ struct dma_mapping_ops {
dma_addr_t dma_address, size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs);
+#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
+ void (*sync_single_range_for_cpu)(struct device *hwdev,
+ dma_addr_t dma_handle, unsigned long offset,
+ size_t size,
+ enum dma_data_direction direction);
+ void (*sync_single_range_for_device)(struct device *hwdev,
+ dma_addr_t dma_handle, unsigned long offset,
+ size_t size,
+ enum dma_data_direction direction);
+ void (*sync_sg_for_cpu)(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction);
+ void (*sync_sg_for_device)(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction);
+#endif
};
/*
@@ -149,10 +159,9 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
}
/*
- * TODO: map_/unmap_single will ideally go away, to be completely
- * replaced by map/unmap_page. Until then, we allow dma_ops to have
- * one or the other, or both by checking to see if the specific
- * function requested exists; and if not, falling back on the other set.
+ * map_/unmap_single actually call through to map/unmap_page now that all the
+ * dma_mapping_ops have been converted over. We just have to get the page and
+ * offset to pass through to map_page
*/
static inline dma_addr_t dma_map_single_attrs(struct device *dev,
void *cpu_addr,
@@ -164,10 +173,6 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
BUG_ON(!dma_ops);
- if (dma_ops->map_single)
- return dma_ops->map_single(dev, cpu_addr, size, direction,
- attrs);
-
return dma_ops->map_page(dev, virt_to_page(cpu_addr),
(unsigned long)cpu_addr % PAGE_SIZE, size,
direction, attrs);
@@ -183,11 +188,6 @@ static inline void dma_unmap_single_attrs(struct device *dev,
BUG_ON(!dma_ops);
- if (dma_ops->unmap_single) {
- dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
- return;
- }
-
dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
}
@@ -201,12 +201,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
BUG_ON(!dma_ops);
- if (dma_ops->map_page)
- return dma_ops->map_page(dev, page, offset, size, direction,
- attrs);
-
- return dma_ops->map_single(dev, page_address(page) + offset, size,
- direction, attrs);
+ return dma_ops->map_page(dev, page, offset, size, direction, attrs);
}
static inline void dma_unmap_page_attrs(struct device *dev,
@@ -219,12 +214,7 @@ static inline void dma_unmap_page_attrs(struct device *dev,
BUG_ON(!dma_ops);
- if (dma_ops->unmap_page) {
- dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
- return;
- }
-
- dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
+ dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
}
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
@@ -308,47 +298,107 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
}
+#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
static inline void dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
- __dma_sync(bus_to_virt(dma_handle), size, direction);
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
+ size, direction);
}
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
- __dma_sync(bus_to_virt(dma_handle), size, direction);
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ dma_ops->sync_single_range_for_device(dev, dma_handle,
+ 0, size, direction);
}
static inline void dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
- struct scatterlist *sg;
- int i;
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
- BUG_ON(direction == DMA_NONE);
+ BUG_ON(!dma_ops);
+ dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
+}
+
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents,
+ enum dma_data_direction direction)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
- for_each_sg(sgl, sg, nents, i)
- __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+ BUG_ON(!dma_ops);
+ dma_ops->sync_single_range_for_cpu(dev, dma_handle,
+ offset, size, direction);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
+ size, direction);
+}
+#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
+static inline void dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+}
+
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents,
+ enum dma_data_direction direction)
+{
}
static inline void dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
- struct scatterlist *sg;
- int i;
+}
- BUG_ON(direction == DMA_NONE);
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+}
- for_each_sg(sgl, sg, nents, i)
- __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
}
+#endif
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
@@ -382,22 +432,6 @@ static inline int dma_get_cache_alignment(void)
#endif
}
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- /* just sync everything for now */
- dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- /* just sync everything for now */
- dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
-}
-
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index b886bec67016..66ea9b8b95c5 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -17,8 +17,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef _PPC64_EEH_H
-#define _PPC64_EEH_H
+#ifndef _POWERPC_EEH_H
+#define _POWERPC_EEH_H
#ifdef __KERNEL__
#include <linux/init.h>
@@ -110,6 +110,7 @@ static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
#define EEH_IO_ERROR_VALUE(size) (-1UL)
#endif /* CONFIG_EEH */
+#ifdef CONFIG_PPC64
/*
* MMIO read/write operations with EEH support.
*/
@@ -207,5 +208,6 @@ static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
eeh_check_failure(addr, *(u32*)buf);
}
+#endif /* CONFIG_PPC64 */
#endif /* __KERNEL__ */
-#endif /* _PPC64_EEH_H */
+#endif /* _POWERPC_EEH_H */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index d812929390e4..cd46f023ec6d 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -267,7 +267,7 @@ extern int ucache_bsize;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
- int executable_stack);
+ int uses_interp);
#define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b);
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index a1029967620b..e4094a5cb05b 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -81,6 +81,36 @@ label##5: \
#define ALT_FTR_SECTION_END_IFCLR(msk) \
ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
+/* MMU feature dependent sections */
+#define BEGIN_MMU_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
+#define BEGIN_MMU_FTR_SECTION START_FTR_SECTION(97)
+
+#define END_MMU_FTR_SECTION_NESTED(msk, val, label) \
+ FTR_SECTION_ELSE_NESTED(label) \
+ MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
+
+#define END_MMU_FTR_SECTION(msk, val) \
+ END_MMU_FTR_SECTION_NESTED(msk, val, 97)
+
+#define END_MMU_FTR_SECTION_IFSET(msk) END_MMU_FTR_SECTION((msk), (msk))
+#define END_MMU_FTR_SECTION_IFCLR(msk) END_MMU_FTR_SECTION((msk), 0)
+
+/* MMU feature sections with alternatives, use BEGIN_FTR_SECTION to start */
+#define MMU_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label)
+#define MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE_NESTED(97)
+#define ALT_MMU_FTR_SECTION_END_NESTED(msk, val, label) \
+ MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
+#define ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, label) \
+ ALT_MMU_FTR_SECTION_END_NESTED(msk, msk, label)
+#define ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, label) \
+ ALT_MMU_FTR_SECTION_END_NESTED(msk, 0, label)
+#define ALT_MMU_FTR_SECTION_END(msk, val) \
+ ALT_MMU_FTR_SECTION_END_NESTED(msk, val, 97)
+#define ALT_MMU_FTR_SECTION_END_IFSET(msk) \
+ ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, 97)
+#define ALT_MMU_FTR_SECTION_END_IFCLR(msk) \
+ ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
+
/* Firmware feature dependent sections */
#define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
#define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97)
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index b298f7a631e6..e5f2ae8362f7 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -7,7 +7,19 @@
#ifndef __ASSEMBLY__
extern void _mcount(void);
-#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ /* reloction of mcount call site is the same as the address */
+ return addr;
+}
+
+struct dyn_arch_ftrace {
+ struct module *mod;
+};
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 91c589520c0a..04e4a620952e 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -38,9 +38,24 @@ extern pte_t *pkmap_page_table;
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
-#define LAST_PKMAP (1 << PTE_SHIFT)
-#define LAST_PKMAP_MASK (LAST_PKMAP-1)
+/*
+ * We use one full pte table with 4K pages. And with 16K/64K pages pte
+ * table covers enough memory (32MB and 512MB resp.) that both FIXMAP
+ * and PKMAP can be placed in single pte table. We use 1024 pages for
+ * PKMAP in case of 16K/64K pages.
+ */
+#ifdef CONFIG_PPC_4K_PAGES
+#define PKMAP_ORDER PTE_SHIFT
+#else
+#define PKMAP_ORDER 10
+#endif
+#define LAST_PKMAP (1 << PKMAP_ORDER)
+#ifndef CONFIG_PPC_4K_PAGES
+#define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
+#else
#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
+#endif
+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
@@ -85,7 +100,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
- flush_tlb_page(NULL, vaddr);
+ local_flush_tlb_page(NULL, vaddr);
return (void*) vaddr;
}
@@ -113,7 +128,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
- flush_tlb_page(NULL, vaddr);
+ local_flush_tlb_page(NULL, vaddr);
#endif
pagefault_enable();
}
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 26f0d0ab27a5..b1dafb6a9743 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -18,6 +18,12 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep);
/*
+ * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
+ * to override the version in mm/hugetlb.c
+ */
+#define vma_mmu_pagesize vma_mmu_pagesize
+
+/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 08266d2728b3..494cd8b0a278 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -713,13 +713,6 @@ static inline void * phys_to_virt(unsigned long address)
*/
#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-/* We do NOT want virtual merging, it would put too much pressure on
- * our iommu allocator. Instead, we want drivers to be smart enough
- * to coalesce sglists that happen to have been mapped in a contiguous
- * way by the iommu
- */
-#define BIO_VMERGE_BOUNDARY 0
-
/*
* 32 bits still uses virt_to_bus() for it's implementation of DMA
* mappings se we have to keep it defined here. We also have some old
diff --git a/arch/powerpc/include/asm/ioctls.h b/arch/powerpc/include/asm/ioctls.h
index 279a6229584b..1842186d872c 100644
--- a/arch/powerpc/include/asm/ioctls.h
+++ b/arch/powerpc/include/asm/ioctls.h
@@ -89,6 +89,8 @@
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
#define TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TIOCGRS485 0x542e
+#define TIOCSRS485 0x542f
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h
index b07ebb9784d3..5ebfe5d3c61f 100644
--- a/arch/powerpc/include/asm/kdump.h
+++ b/arch/powerpc/include/asm/kdump.h
@@ -1,6 +1,8 @@
#ifndef _PPC64_KDUMP_H
#define _PPC64_KDUMP_H
+#include <asm/page.h>
+
/* Kdump kernel runs at 32 MB, change at your peril. */
#define KDUMP_KERNELBASE 0x2000000
@@ -11,8 +13,19 @@
#ifdef CONFIG_CRASH_DUMP
+/*
+ * On PPC64 translation is disabled during trampoline setup, so we use
+ * physical addresses. Though on PPC32 translation is already enabled,
+ * so we can't do the same. Luckily create_trampoline() creates relative
+ * branches, so we can just add the PAGE_OFFSET and don't worry about it.
+ */
+#ifdef __powerpc64__
#define KDUMP_TRAMPOLINE_START 0x0100
#define KDUMP_TRAMPOLINE_END 0x3000
+#else
+#define KDUMP_TRAMPOLINE_START (0x0100 + PAGE_OFFSET)
+#define KDUMP_TRAMPOLINE_END (0x3000 + PAGE_OFFSET)
+#endif /* __powerpc64__ */
#define KDUMP_MIN_TCE_ENTRIES 2048
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 3736d9b33289..7e06b43720d3 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -33,12 +33,12 @@
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
+#include <asm/reg.h>
typedef void (*crash_shutdown_t)(void);
#ifdef CONFIG_KEXEC
-#ifdef __powerpc64__
/*
* This function is responsible for capturing register states if coming
* via panic or invoking dump using sysrq-trigger.
@@ -48,67 +48,9 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
{
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
- else {
- /* FIXME Merge this with xmon_save_regs ?? */
- unsigned long tmp1, tmp2;
- __asm__ __volatile__ (
- "std 0,0(%2)\n"
- "std 1,8(%2)\n"
- "std 2,16(%2)\n"
- "std 3,24(%2)\n"
- "std 4,32(%2)\n"
- "std 5,40(%2)\n"
- "std 6,48(%2)\n"
- "std 7,56(%2)\n"
- "std 8,64(%2)\n"
- "std 9,72(%2)\n"
- "std 10,80(%2)\n"
- "std 11,88(%2)\n"
- "std 12,96(%2)\n"
- "std 13,104(%2)\n"
- "std 14,112(%2)\n"
- "std 15,120(%2)\n"
- "std 16,128(%2)\n"
- "std 17,136(%2)\n"
- "std 18,144(%2)\n"
- "std 19,152(%2)\n"
- "std 20,160(%2)\n"
- "std 21,168(%2)\n"
- "std 22,176(%2)\n"
- "std 23,184(%2)\n"
- "std 24,192(%2)\n"
- "std 25,200(%2)\n"
- "std 26,208(%2)\n"
- "std 27,216(%2)\n"
- "std 28,224(%2)\n"
- "std 29,232(%2)\n"
- "std 30,240(%2)\n"
- "std 31,248(%2)\n"
- "mfmsr %0\n"
- "std %0, 264(%2)\n"
- "mfctr %0\n"
- "std %0, 280(%2)\n"
- "mflr %0\n"
- "std %0, 288(%2)\n"
- "bl 1f\n"
- "1: mflr %1\n"
- "std %1, 256(%2)\n"
- "mtlr %0\n"
- "mfxer %0\n"
- "std %0, 296(%2)\n"
- : "=&r" (tmp1), "=&r" (tmp2)
- : "b" (newregs)
- : "memory");
- }
+ else
+ ppc_save_regs(newregs);
}
-#else
-/*
- * Provide a dummy definition to avoid build failures. Will remain
- * empty till crash dump support is enabled.
- */
-static inline void crash_setup_regs(struct pt_regs *newregs,
- struct pt_regs *oldregs) { }
-#endif /* !__powerpc64 __ */
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h
new file mode 100644
index 000000000000..f49031b632ca
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_44x.h
@@ -0,0 +1,61 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __ASM_44X_H__
+#define __ASM_44X_H__
+
+#include <linux/kvm_host.h>
+
+#define PPC44x_TLB_SIZE 64
+
+/* If the guest is expecting it, this can be as large as we like; we'd just
+ * need to find some way of advertising it. */
+#define KVM44x_GUEST_TLB_SIZE 64
+
+struct kvmppc_44x_shadow_ref {
+ struct page *page;
+ u16 gtlb_index;
+ u8 writeable;
+ u8 tid;
+};
+
+struct kvmppc_vcpu_44x {
+ /* Unmodified copy of the guest's TLB. */
+ struct kvmppc_44x_tlbe guest_tlb[KVM44x_GUEST_TLB_SIZE];
+
+ /* References to guest pages in the hardware TLB. */
+ struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE];
+
+ /* State of the shadow TLB at guest context switch time. */
+ struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE];
+ u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
+
+ struct kvm_vcpu vcpu;
+};
+
+static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu)
+{
+ return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu);
+}
+
+void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid);
+void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu);
+void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu);
+
+#endif /* __ASM_44X_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 34b52b7180cd..c1e436fe7738 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -64,27 +64,58 @@ struct kvm_vcpu_stat {
u32 halt_wakeup;
};
-struct tlbe {
+struct kvmppc_44x_tlbe {
u32 tid; /* Only the low 8 bits are used. */
u32 word0;
u32 word1;
u32 word2;
};
-struct kvm_arch {
+enum kvm_exit_types {
+ MMIO_EXITS,
+ DCR_EXITS,
+ SIGNAL_EXITS,
+ ITLB_REAL_MISS_EXITS,
+ ITLB_VIRT_MISS_EXITS,
+ DTLB_REAL_MISS_EXITS,
+ DTLB_VIRT_MISS_EXITS,
+ SYSCALL_EXITS,
+ ISI_EXITS,
+ DSI_EXITS,
+ EMULATED_INST_EXITS,
+ EMULATED_MTMSRWE_EXITS,
+ EMULATED_WRTEE_EXITS,
+ EMULATED_MTSPR_EXITS,
+ EMULATED_MFSPR_EXITS,
+ EMULATED_MTMSR_EXITS,
+ EMULATED_MFMSR_EXITS,
+ EMULATED_TLBSX_EXITS,
+ EMULATED_TLBWE_EXITS,
+ EMULATED_RFI_EXITS,
+ DEC_EXITS,
+ EXT_INTR_EXITS,
+ HALT_WAKEUP,
+ USR_PR_INST,
+ FP_UNAVAIL,
+ DEBUG_EXITS,
+ TIMEINGUEST,
+ __NUMBER_OF_KVM_EXIT_TYPES
};
-struct kvm_vcpu_arch {
- /* Unmodified copy of the guest's TLB. */
- struct tlbe guest_tlb[PPC44x_TLB_SIZE];
- /* TLB that's actually used when the guest is running. */
- struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
- /* Pages which are referenced in the shadow TLB. */
- struct page *shadow_pages[PPC44x_TLB_SIZE];
+/* allow access to big endian 32bit upper/lower parts and 64bit var */
+struct kvmppc_exit_timing {
+ union {
+ u64 tv64;
+ struct {
+ u32 tbu, tbl;
+ } tv32;
+ };
+};
- /* Track which TLB entries we've modified in the current exit. */
- u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
+struct kvm_arch {
+};
+struct kvm_vcpu_arch {
u32 host_stack;
u32 host_pid;
u32 host_dbcr0;
@@ -94,32 +125,32 @@ struct kvm_vcpu_arch {
u32 host_msr;
u64 fpr[32];
- u32 gpr[32];
+ ulong gpr[32];
- u32 pc;
+ ulong pc;
u32 cr;
- u32 ctr;
- u32 lr;
- u32 xer;
+ ulong ctr;
+ ulong lr;
+ ulong xer;
- u32 msr;
+ ulong msr;
u32 mmucr;
- u32 sprg0;
- u32 sprg1;
- u32 sprg2;
- u32 sprg3;
- u32 sprg4;
- u32 sprg5;
- u32 sprg6;
- u32 sprg7;
- u32 srr0;
- u32 srr1;
- u32 csrr0;
- u32 csrr1;
- u32 dsrr0;
- u32 dsrr1;
- u32 dear;
- u32 esr;
+ ulong sprg0;
+ ulong sprg1;
+ ulong sprg2;
+ ulong sprg3;
+ ulong sprg4;
+ ulong sprg5;
+ ulong sprg6;
+ ulong sprg7;
+ ulong srr0;
+ ulong srr1;
+ ulong csrr0;
+ ulong csrr1;
+ ulong dsrr0;
+ ulong dsrr1;
+ ulong dear;
+ ulong esr;
u32 dec;
u32 decar;
u32 tbl;
@@ -127,7 +158,7 @@ struct kvm_vcpu_arch {
u32 tcr;
u32 tsr;
u32 ivor[16];
- u32 ivpr;
+ ulong ivpr;
u32 pir;
u32 shadow_pid;
@@ -140,9 +171,22 @@ struct kvm_vcpu_arch {
u32 dbcr0;
u32 dbcr1;
+#ifdef CONFIG_KVM_EXIT_TIMING
+ struct kvmppc_exit_timing timing_exit;
+ struct kvmppc_exit_timing timing_last_enter;
+ u32 last_exit_type;
+ u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
+ u64 timing_last_exit;
+ struct dentry *debugfs_exit_timing;
+#endif
+
u32 last_inst;
- u32 fault_dear;
- u32 fault_esr;
+ ulong fault_dear;
+ ulong fault_esr;
gpa_t paddr_accessed;
u8 io_gpr; /* GPR used as IO source/target */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index bb62ad876de3..36d2a50a8487 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -29,11 +29,6 @@
#include <linux/kvm_types.h>
#include <linux/kvm_host.h>
-struct kvm_tlb {
- struct tlbe guest_tlb[PPC44x_TLB_SIZE];
- struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
-};
-
enum emulation_result {
EMULATE_DONE, /* no further processing */
EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
@@ -41,9 +36,6 @@ enum emulation_result {
EMULATE_FAIL, /* can't emulate this instruction */
};
-extern const unsigned char exception_priority[];
-extern const unsigned char priority_exception[];
-
extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
extern char kvmppc_handlers_start[];
extern unsigned long kvmppc_handler_len;
@@ -58,51 +50,44 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
extern int kvmppc_emulate_instruction(struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
-extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
- u64 asid, u32 flags);
-extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
- gva_t eend, u32 asid);
+extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
+ u64 asid, u32 flags, u32 max_bytes,
+ unsigned int gtlb_idx);
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
-/* XXX Book E specific */
-extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i);
-
-extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);
-
-static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception)
-{
- unsigned int priority = exception_priority[exception];
- set_bit(priority, &vcpu->arch.pending_exceptions);
-}
-
-static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception)
-{
- unsigned int priority = exception_priority[exception];
- clear_bit(priority, &vcpu->arch.pending_exceptions);
-}
-
-/* Helper function for "full" MSR writes. No need to call this if only EE is
- * changing. */
-static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
-{
- if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
- kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
-
- vcpu->arch.msr = new_msr;
-
- if (vcpu->arch.msr & MSR_WE)
- kvm_vcpu_block(vcpu);
-}
-
-static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
-{
- if (vcpu->arch.pid != new_pid) {
- vcpu->arch.pid = new_pid;
- vcpu->arch.swap_pid = 1;
- }
-}
+/* Core-specific hooks */
+
+extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
+ unsigned int id);
+extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_check_processor_compat(void);
+extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr);
+
+extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
+
+extern void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu);
+
+extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
+extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq);
+
+extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int op, int *advance);
+extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);
+extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
+
+extern int kvmppc_booke_init(void);
+extern void kvmppc_booke_exit(void);
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index 612d83276653..84b457a3c1bc 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -67,7 +67,7 @@ static __inline__ long local_inc_return(local_t *l)
bne- 1b"
: "=&r" (t)
: "r" (&(l->a.counter))
- : "cc", "memory");
+ : "cc", "xer", "memory");
return t;
}
@@ -94,7 +94,7 @@ static __inline__ long local_dec_return(local_t *l)
bne- 1b"
: "=&r" (t)
: "r" (&(l->a.counter))
- : "cc", "memory");
+ : "cc", "xer", "memory");
return t;
}
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 2fe268b10333..25aaa97facd8 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -133,7 +133,8 @@ struct lppaca {
//=============================================================================
// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data
//=============================================================================
- u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF
+ u32 page_ins; // CMO Hint - # page ins by OS x00-x04
+ u8 pmc_save_area[252]; // PMC interrupt Area x04-xFF
} __attribute__((__aligned__(0x400)));
extern struct lppaca lppaca[];
diff --git a/arch/powerpc/include/asm/mmu-40x.h b/arch/powerpc/include/asm/mmu-40x.h
index 3d108676584c..776f415a36aa 100644
--- a/arch/powerpc/include/asm/mmu-40x.h
+++ b/arch/powerpc/include/asm/mmu-40x.h
@@ -54,8 +54,9 @@
#ifndef __ASSEMBLY__
typedef struct {
- unsigned long id;
- unsigned long vdso_base;
+ unsigned int id;
+ unsigned int active;
+ unsigned long vdso_base;
} mm_context_t;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index a825524c981a..27cc6fdcd3b7 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -4,6 +4,8 @@
* PPC440 support
*/
+#include <asm/page.h>
+
#define PPC44x_MMUCR_TID 0x000000ff
#define PPC44x_MMUCR_STS 0x00010000
@@ -54,10 +56,12 @@
#ifndef __ASSEMBLY__
extern unsigned int tlb_44x_hwater;
+extern unsigned int tlb_44x_index;
typedef struct {
- unsigned long id;
- unsigned long vdso_base;
+ unsigned int id;
+ unsigned int active;
+ unsigned long vdso_base;
} mm_context_t;
#endif /* !__ASSEMBLY__ */
@@ -73,4 +77,19 @@ typedef struct {
/* Size of the TLBs used for pinning in lowmem */
#define PPC_PIN_SIZE (1 << 28) /* 256M */
+#if (PAGE_SHIFT == 12)
+#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
+#elif (PAGE_SHIFT == 14)
+#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
+#elif (PAGE_SHIFT == 16)
+#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
+#else
+#error "Unsupported PAGE_SIZE"
+#endif
+
+#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2)
+#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2)
+#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2)
+#define PPC44x_PTE_ADD_MASK_BIT (32 - PTE_T_LOG2 - PTE_SHIFT)
+
#endif /* _ASM_POWERPC_MMU_44X_H_ */
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 9db877eb88db..07865a357848 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -137,7 +137,8 @@
#ifndef __ASSEMBLY__
typedef struct {
- unsigned long id;
+ unsigned int id;
+ unsigned int active;
unsigned long vdso_base;
} mm_context_t;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu-fsl-booke.h b/arch/powerpc/include/asm/mmu-fsl-booke.h
index 925d93cf64d8..3f941c0f7e8e 100644
--- a/arch/powerpc/include/asm/mmu-fsl-booke.h
+++ b/arch/powerpc/include/asm/mmu-fsl-booke.h
@@ -40,6 +40,8 @@
#define MAS2_M 0x00000004
#define MAS2_G 0x00000002
#define MAS2_E 0x00000001
+#define MAS2_EPN_MASK(size) (~0 << (2*(size) + 10))
+#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags))
#define MAS3_RPN 0xFFFFF000
#define MAS3_U0 0x00000200
@@ -74,8 +76,9 @@
#ifndef __ASSEMBLY__
typedef struct {
- unsigned long id;
- unsigned long vdso_base;
+ unsigned int id;
+ unsigned int active;
+ unsigned long vdso_base;
} mm_context_t;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 4c0e1b4f975c..6e7639911318 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -2,6 +2,63 @@
#define _ASM_POWERPC_MMU_H_
#ifdef __KERNEL__
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+
+/*
+ * MMU features bit definitions
+ */
+
+/*
+ * First half is MMU families
+ */
+#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001)
+#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002)
+#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
+#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
+#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
+
+/*
+ * This is individual features
+ */
+
+/* Enable use of high BAT registers */
+#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000)
+
+/* Enable >32-bit physical addresses on 32-bit processor, only used
+ * by CONFIG_6xx currently as BookE supports that from day 1
+ */
+#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000)
+
+/* Enable use of broadcast TLB invalidations. We don't always set it
+ * on processors that support it due to other constraints with the
+ * use of such invalidations
+ */
+#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000)
+
+/* Enable use of tlbilx invalidate-by-PID variant.
+ */
+#define MMU_FTR_USE_TLBILX_PID ASM_CONST(0x00080000)
+
+/* This indicates that the processor cannot handle multiple outstanding
+ * broadcast tlbivax or tlbsync. This makes the code use a spinlock
+ * around such invalidate forms.
+ */
+#define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000)
+
+#ifndef __ASSEMBLY__
+#include <asm/cputable.h>
+
+static inline int mmu_has_feature(unsigned long feature)
+{
+ return (cur_cpu_spec->mmu_features & feature);
+}
+
+extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
+
+#endif /* !__ASSEMBLY__ */
+
+
#ifdef CONFIG_PPC64
/* 64-bit classic hash table MMU */
# include <asm/mmu-hash64.h>
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 6b993ef452ff..ab4f19263c42 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -2,237 +2,26 @@
#define __ASM_POWERPC_MMU_CONTEXT_H
#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
#include <asm/mmu.h>
#include <asm/cputable.h>
#include <asm-generic/mm_hooks.h>
-
-#ifndef CONFIG_PPC64
-#include <asm/atomic.h>
-#include <linux/bitops.h>
-
-/*
- * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
- * (virtual segment identifiers) for each context. Although the
- * hardware supports 24-bit VSIDs, and thus >1 million contexts,
- * we only use 32,768 of them. That is ample, since there can be
- * at most around 30,000 tasks in the system anyway, and it means
- * that we can use a bitmap to indicate which contexts are in use.
- * Using a bitmap means that we entirely avoid all of the problems
- * that we used to have when the context number overflowed,
- * particularly on SMP systems.
- * -- paulus.
- */
-
-/*
- * This function defines the mapping from contexts to VSIDs (virtual
- * segment IDs). We use a skew on both the context and the high 4 bits
- * of the 32-bit virtual address (the "effective segment ID") in order
- * to spread out the entries in the MMU hash table. Note, if this
- * function is changed then arch/ppc/mm/hashtable.S will have to be
- * changed to correspond.
- */
-#define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
- & 0xffffff)
-
-/*
- The MPC8xx has only 16 contexts. We rotate through them on each
- task switch. A better way would be to keep track of tasks that
- own contexts, and implement an LRU usage. That way very active
- tasks don't always have to pay the TLB reload overhead. The
- kernel pages are mapped shared, so the kernel can run on behalf
- of any task that makes a kernel entry. Shared does not mean they
- are not protected, just that the ASID comparison is not performed.
- -- Dan
-
- The IBM4xx has 256 contexts, so we can just rotate through these
- as a way of "switching" contexts. If the TID of the TLB is zero,
- the PID/TID comparison is disabled, so we can use a TID of zero
- to represent all kernel pages as shared among all contexts.
- -- Dan
- */
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
-#ifdef CONFIG_8xx
-#define NO_CONTEXT 16
-#define LAST_CONTEXT 15
-#define FIRST_CONTEXT 0
-
-#elif defined(CONFIG_4xx)
-#define NO_CONTEXT 256
-#define LAST_CONTEXT 255
-#define FIRST_CONTEXT 1
-
-#elif defined(CONFIG_E200) || defined(CONFIG_E500)
-#define NO_CONTEXT 256
-#define LAST_CONTEXT 255
-#define FIRST_CONTEXT 1
-
-#else
-
-/* PPC 6xx, 7xx CPUs */
-#define NO_CONTEXT ((unsigned long) -1)
-#define LAST_CONTEXT 32767
-#define FIRST_CONTEXT 1
-#endif
-
-/*
- * Set the current MMU context.
- * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by
- * loading up the segment registers for the user part of the address space.
- *
- * Since the PGD is immediately available, it is much faster to simply
- * pass this along as a second parameter, which is required for 8xx and
- * can be used for debugging on all processors (if you happen to have
- * an Abatron).
- */
-extern void set_context(unsigned long contextid, pgd_t *pgd);
-
-/*
- * Bitmap of contexts in use.
- * The size of this bitmap is LAST_CONTEXT + 1 bits.
- */
-extern unsigned long context_map[];
-
-/*
- * This caches the next context number that we expect to be free.
- * Its use is an optimization only, we can't rely on this context
- * number to be free, but it usually will be.
- */
-extern unsigned long next_mmu_context;
-
-/*
- * If we don't have sufficient contexts to give one to every task
- * that could be in the system, we need to be able to steal contexts.
- * These variables support that.
- */
-#if LAST_CONTEXT < 30000
-#define FEW_CONTEXTS 1
-extern atomic_t nr_free_contexts;
-extern struct mm_struct *context_mm[LAST_CONTEXT+1];
-extern void steal_context(void);
-#endif
-
-/*
- * Get a new mmu context for the address space described by `mm'.
- */
-static inline void get_mmu_context(struct mm_struct *mm)
-{
- unsigned long ctx;
-
- if (mm->context.id != NO_CONTEXT)
- return;
-#ifdef FEW_CONTEXTS
- while (atomic_dec_if_positive(&nr_free_contexts) < 0)
- steal_context();
-#endif
- ctx = next_mmu_context;
- while (test_and_set_bit(ctx, context_map)) {
- ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
- if (ctx > LAST_CONTEXT)
- ctx = 0;
- }
- next_mmu_context = (ctx + 1) & LAST_CONTEXT;
- mm->context.id = ctx;
-#ifdef FEW_CONTEXTS
- context_mm[ctx] = mm;
-#endif
-}
-
-/*
- * Set up the context for a new address space.
- */
-static inline int init_new_context(struct task_struct *t, struct mm_struct *mm)
-{
- mm->context.id = NO_CONTEXT;
- return 0;
-}
-
-/*
- * We're finished using the context for an address space.
- */
-static inline void destroy_context(struct mm_struct *mm)
-{
- preempt_disable();
- if (mm->context.id != NO_CONTEXT) {
- clear_bit(mm->context.id, context_map);
- mm->context.id = NO_CONTEXT;
-#ifdef FEW_CONTEXTS
- atomic_inc(&nr_free_contexts);
-#endif
- }
- preempt_enable();
-}
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
-{
-#ifdef CONFIG_ALTIVEC
- if (cpu_has_feature(CPU_FTR_ALTIVEC))
- asm volatile ("dssall;\n"
-#ifndef CONFIG_POWER4
- "sync;\n" /* G4 needs a sync here, G5 apparently not */
-#endif
- : : );
-#endif /* CONFIG_ALTIVEC */
-
- tsk->thread.pgdir = next->pgd;
-
- /* No need to flush userspace segments if the mm doesnt change */
- if (prev == next)
- return;
-
- /* Setup new userspace context */
- get_mmu_context(next);
- set_context(next->context.id, next->pgd);
-}
-
-#define deactivate_mm(tsk,mm) do { } while (0)
+#include <asm/cputhreads.h>
/*
- * After we have set current->mm to a new value, this activates
- * the context for the new mm so we see the new mappings.
+ * Most if the context management is out of line
*/
-#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, current)
-
extern void mmu_context_init(void);
-
-
-#else
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-
-/*
- * Copyright (C) 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-static inline void enter_lazy_tlb(struct mm_struct *mm,
- struct task_struct *tsk)
-{
-}
-
-/*
- * The proto-VSID space has 2^35 - 1 segments available for user mappings.
- * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
- * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
- */
-#define NO_CONTEXT 0
-#define MAX_CONTEXT ((1UL << 19) - 1)
-
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
+extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
+extern void set_context(unsigned long id, pgd_t *pgd);
/*
* switch_mm is the entry point called from the architecture independent
@@ -241,22 +30,39 @@ extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
- if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
- cpu_set(smp_processor_id(), next->cpu_vm_mask);
+ /* Mark this context has been used on the new CPU */
+ cpu_set(smp_processor_id(), next->cpu_vm_mask);
+
+ /* 32-bit keeps track of the current PGDIR in the thread struct */
+#ifdef CONFIG_PPC32
+ tsk->thread.pgdir = next->pgd;
+#endif /* CONFIG_PPC32 */
- /* No need to flush userspace segments if the mm doesnt change */
+ /* Nothing else to do if we aren't actually switching */
if (prev == next)
return;
+ /* We must stop all altivec streams before changing the HW
+ * context
+ */
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile ("dssall");
#endif /* CONFIG_ALTIVEC */
+ /* The actual HW switching method differs between the various
+ * sub architectures.
+ */
+#ifdef CONFIG_PPC_STD_MMU_64
if (cpu_has_feature(CPU_FTR_SLB))
switch_slb(tsk, next);
else
switch_stab(tsk, next);
+#else
+ /* Out of line for now */
+ switch_mmu_context(prev, next);
+#endif
+
}
#define deactivate_mm(tsk,mm) do { } while (0)
@@ -274,6 +80,11 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
local_irq_restore(flags);
}
-#endif /* CONFIG_PPC64 */
+/* We don't currently use enter_lazy_tlb() for anything */
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+ struct task_struct *tsk)
+{
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index e5f14b13ccf0..08454880a2c0 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -34,11 +34,19 @@ struct mod_arch_specific {
#ifdef __powerpc64__
unsigned int stubs_section; /* Index of stubs section in module */
unsigned int toc_section; /* What section is the TOC? */
-#else
+#ifdef CONFIG_DYNAMIC_FTRACE
+ unsigned long toc;
+ unsigned long tramp;
+#endif
+
+#else /* powerpc64 */
/* Indices of PLT sections within module. */
unsigned int core_plt_section;
unsigned int init_plt_section;
+#ifdef CONFIG_DYNAMIC_FTRACE
+ unsigned long tramp;
#endif
+#endif /* powerpc64 */
/* List of BUG addresses, source line numbers and filenames */
struct list_head bug_list;
@@ -68,6 +76,12 @@ struct mod_arch_specific {
# endif /* MODULE */
#endif
+#ifdef CONFIG_DYNAMIC_FTRACE
+# ifdef MODULE
+ asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous");
+# endif /* MODULE */
+#endif
+
struct exception_table_entry;
void sort_ex_table(struct exception_table_entry *start,
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index 81ef10b6b672..81a23932a160 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -239,6 +239,25 @@ struct mpc52xx_cdm {
u16 mclken_div_psc6; /* CDM + 0x36 reg13 byte2,3 */
};
+/* Interrupt controller Register set */
+struct mpc52xx_intr {
+ u32 per_mask; /* INTR + 0x00 */
+ u32 per_pri1; /* INTR + 0x04 */
+ u32 per_pri2; /* INTR + 0x08 */
+ u32 per_pri3; /* INTR + 0x0c */
+ u32 ctrl; /* INTR + 0x10 */
+ u32 main_mask; /* INTR + 0x14 */
+ u32 main_pri1; /* INTR + 0x18 */
+ u32 main_pri2; /* INTR + 0x1c */
+ u32 reserved1; /* INTR + 0x20 */
+ u32 enc_status; /* INTR + 0x24 */
+ u32 crit_status; /* INTR + 0x28 */
+ u32 main_status; /* INTR + 0x2c */
+ u32 per_status; /* INTR + 0x30 */
+ u32 reserved2; /* INTR + 0x34 */
+ u32 per_error; /* INTR + 0x38 */
+};
+
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h
index 8917ed630565..a218da6bec7c 100644
--- a/arch/powerpc/include/asm/mpc52xx_psc.h
+++ b/arch/powerpc/include/asm/mpc52xx_psc.h
@@ -68,12 +68,20 @@
#define MPC52xx_PSC_IMR_ORERR 0x1000
#define MPC52xx_PSC_IMR_IPC 0x8000
-/* PSC input port change bit */
+/* PSC input port change bits */
#define MPC52xx_PSC_CTS 0x01
#define MPC52xx_PSC_DCD 0x02
#define MPC52xx_PSC_D_CTS 0x10
#define MPC52xx_PSC_D_DCD 0x20
+/* PSC acr bits */
+#define MPC52xx_PSC_IEC_CTS 0x01
+#define MPC52xx_PSC_IEC_DCD 0x02
+
+/* PSC output port bits */
+#define MPC52xx_PSC_OP_RTS 0x01
+#define MPC52xx_PSC_OP_RES 0x02
+
/* PSC mode fields */
#define MPC52xx_PSC_MODE_5_BITS 0x00
#define MPC52xx_PSC_MODE_6_BITS 0x01
@@ -91,6 +99,7 @@
#define MPC52xx_PSC_MODE_ONE_STOP_5_BITS 0x00
#define MPC52xx_PSC_MODE_ONE_STOP 0x07
#define MPC52xx_PSC_MODE_TWO_STOP 0x0f
+#define MPC52xx_PSC_MODE_TXCTS 0x10
#define MPC52xx_PSC_RFNUM_MASK 0x01ff
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h
index 458c1f7fbc18..dabc01c727b8 100644
--- a/arch/powerpc/include/asm/mutex.h
+++ b/arch/powerpc/include/asm/mutex.h
@@ -1,9 +1,134 @@
/*
- * Pull in the generic implementation for the mutex fastpath.
+ * Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm
+ */
+#ifndef _ASM_POWERPC_MUTEX_H
+#define _ASM_POWERPC_MUTEX_H
+
+static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new)
+{
+ int t;
+
+ __asm__ __volatile__ (
+"1: lwarx %0,0,%1 # mutex trylock\n\
+ cmpw 0,%0,%2\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %3,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:"
+ : "=&r" (t)
+ : "r" (&v->counter), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return t;
+}
+
+static inline int __mutex_dec_return_lock(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1 # mutex lock\n\
+ addic %0,%0,-1\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static inline int __mutex_inc_return_unlock(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%1 # mutex unlock\n\
+ addic %0,%0,1\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1 \n\
+ bne- 1b"
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function MUST leave the value lower than
+ * 1 even when the "1" assertion wasn't true.
+ */
+static inline void
+__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(__mutex_dec_return_lock(count) < 0))
+ fail_fn(count);
+}
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns.
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (unlikely(__mutex_dec_return_lock(count) < 0))
+ return fail_fn(count);
+ return 0;
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the count from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 0
+ *
+ * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
+ * In the failure case, this function is allowed to either set the value to
+ * 1, or to set it to a value lower than 1.
+ */
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(__mutex_inc_return_unlock(count) <= 0))
+ fail_fn(count);
+}
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
*
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
+ * Change the count from 1 to 0, and return 1 (success), or if the count
+ * was not 1, then return 0 (failure).
*/
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (likely(__mutex_cmpxchg_lock(count, 1, 0) == 1))
+ return 1;
+ return 0;
+}
-#include <asm-generic/mutex-dec.h>
+#endif
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index c0b8d4a29a91..197d569f5bd3 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -19,12 +19,15 @@
#include <asm/kdump.h>
/*
- * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
+ * On regular PPC32 page size is 4K (but we support 4K/16K/64K pages
+ * on PPC44x). For PPC64 we support either 4K or 64K software
* page size. When using 64K pages however, whether we are really supporting
* 64K pages in HW or not is irrelevant to those definitions.
*/
-#ifdef CONFIG_PPC_64K_PAGES
+#if defined(CONFIG_PPC_64K_PAGES)
#define PAGE_SHIFT 16
+#elif defined(CONFIG_PPC_16K_PAGES)
+#define PAGE_SHIFT 14
#else
#define PAGE_SHIFT 12
#endif
@@ -151,7 +154,7 @@ typedef struct { pte_basic_t pte; } pte_t;
/* 64k pages additionally define a bigger "real PTE" type that gathers
* the "second half" part of the PTE for pseudo 64k pages
*/
-#ifdef CONFIG_PPC_64K_PAGES
+#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
#else
typedef struct { pte_t pte; } real_pte_t;
@@ -191,10 +194,10 @@ typedef pte_basic_t pte_t;
#define pte_val(x) (x)
#define __pte(x) (x)
-#ifdef CONFIG_PPC_64K_PAGES
+#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
#else
-typedef unsigned long real_pte_t;
+typedef pte_t real_pte_t;
#endif
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index d77072a32cc6..1458d9500381 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -19,6 +19,8 @@
#define PTE_FLAGS_OFFSET 0
#endif
+#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */
+
#ifndef __ASSEMBLY__
/*
* The basic type of a PTE - 64 bits for those CPUs with > 32 bit
@@ -26,10 +28,8 @@
*/
#ifdef CONFIG_PTE_64BIT
typedef unsigned long long pte_basic_t;
-#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
#else
typedef unsigned long pte_basic_t;
-#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
#endif
struct page;
@@ -39,6 +39,9 @@ extern void copy_page(void *to, void *from);
#include <asm-generic/page.h>
+#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
+#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PAGE_32_H */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 9047af7baa69..84007afabdb5 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -13,7 +13,6 @@
struct device_node;
-extern unsigned int ppc_pci_flags;
enum {
/* Force re-assigning all resources (ignore firmware
* setup completely)
@@ -36,6 +35,31 @@ enum {
/* ... except for domain 0 */
PPC_PCI_COMPAT_DOMAIN_0 = 0x00000020,
};
+#ifdef CONFIG_PCI
+extern unsigned int ppc_pci_flags;
+
+static inline void ppc_pci_set_flags(int flags)
+{
+ ppc_pci_flags = flags;
+}
+
+static inline void ppc_pci_add_flags(int flags)
+{
+ ppc_pci_flags |= flags;
+}
+
+static inline int ppc_pci_has_flag(int flag)
+{
+ return (ppc_pci_flags & flag);
+}
+#else
+static inline void ppc_pci_set_flags(int flags) { }
+static inline void ppc_pci_add_flags(int flags) { }
+static inline int ppc_pci_has_flag(int flag)
+{
+ return 0;
+}
+#endif
/*
@@ -241,9 +265,6 @@ extern void pcibios_remove_pci_devices(struct pci_bus *bus);
/** Discover new pci devices under this bus, and add them */
extern void pcibios_add_pci_devices(struct pci_bus *bus);
-extern void pcibios_fixup_new_pci_devices(struct pci_bus *bus);
-
-extern int pcibios_remove_root_bus(struct pci_controller *phb);
static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
{
@@ -290,6 +311,7 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
/* Allocate & free a PCI host bridge structure */
extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
extern void pcibios_free_controller(struct pci_controller *phb);
+extern void pcibios_setup_phb_resources(struct pci_controller *hose);
#ifdef CONFIG_PCI
extern unsigned long pci_address_to_pio(phys_addr_t address);
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 57a2a494886b..3548159a1beb 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -38,8 +38,8 @@ struct pci_dev;
* Set this to 1 if you want the kernel to re-assign all PCI
* bus numbers (don't do that on ppc64 yet !)
*/
-#define pcibios_assign_all_busses() (ppc_pci_flags & \
- PPC_PCI_REASSIGN_ALL_BUS)
+#define pcibios_assign_all_busses() \
+ (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
#define pcibios_scan_all_fns(a, b) 0
static inline void pcibios_set_master(struct pci_dev *dev)
@@ -204,15 +204,14 @@ static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
return root;
}
-extern void pcibios_setup_new_device(struct pci_dev *dev);
-
extern void pcibios_claim_one_bus(struct pci_bus *b);
-extern void pcibios_allocate_bus_resources(struct pci_bus *bus);
+extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
extern void pcibios_resource_survey(void);
extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
+extern int remove_phb_dynamic(struct pci_controller *phb);
extern struct pci_dev *of_create_pci_dev(struct device_node *node,
struct pci_bus *bus, int devfn);
@@ -221,6 +220,7 @@ extern void of_scan_pci_bridge(struct device_node *node,
struct pci_dev *dev);
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
+extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
extern int pci_read_irq_line(struct pci_dev *dev);
@@ -235,9 +235,8 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
resource_size_t *start, resource_size_t *end);
-extern void pcibios_do_bus_setup(struct pci_bus *bus);
-extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus);
-
+extern void pcibios_setup_bus_devices(struct pci_bus *bus);
+extern void pcibios_setup_bus_self(struct pci_bus *bus);
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 58c07147b3ea..0815eb40acae 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -3,6 +3,8 @@
#include <linux/threads.h>
+#define PTE_NONCACHE_NUM 0 /* dummy for now to share code w/ppc64 */
+
extern void __bad_pte(pmd_t *pmd);
extern pgd_t *pgd_alloc(struct mm_struct *mm);
@@ -33,10 +35,13 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
-extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
-extern void pte_free(struct mm_struct *mm, pgtable_t pte);
-#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
+static inline void pgtable_free(pgtable_free_t pgf)
+{
+ void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
+
+ free_page((unsigned long)p);
+}
#define check_pgt_cache() do { } while (0)
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 812a1d8f35cb..afda2bdd860f 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -7,7 +7,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
@@ -108,31 +107,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
return page;
}
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
-{
- pgtable_page_dtor(ptepage);
- __free_page(ptepage);
-}
-
-#define PGF_CACHENUM_MASK 0x7
-
-typedef struct pgtable_free {
- unsigned long val;
-} pgtable_free_t;
-
-static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
- unsigned long mask)
-{
- BUG_ON(cachenum > PGF_CACHENUM_MASK);
-
- return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
-}
-
static inline void pgtable_free(pgtable_free_t pgf)
{
void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
@@ -144,14 +118,6 @@ static inline void pgtable_free(pgtable_free_t pgf)
kmem_cache_free(pgtable_cache[cachenum], p);
}
-extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
-
-#define __pte_free_tlb(tlb,ptepage) \
-do { \
- pgtable_page_dtor(ptepage); \
- pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
- PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
-} while (0)
#define __pmd_free_tlb(tlb, pmd) \
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index b4505ed0f0f2..5d8480265a77 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -2,11 +2,52 @@
#define _ASM_POWERPC_PGALLOC_H
#ifdef __KERNEL__
+#include <linux/mm.h>
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ pgtable_page_dtor(ptepage);
+ __free_page(ptepage);
+}
+
+typedef struct pgtable_free {
+ unsigned long val;
+} pgtable_free_t;
+
+#define PGF_CACHENUM_MASK 0x7
+
+static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
+ unsigned long mask)
+{
+ BUG_ON(cachenum > PGF_CACHENUM_MASK);
+
+ return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
+}
+
#ifdef CONFIG_PPC64
#include <asm/pgalloc-64.h>
#else
#include <asm/pgalloc-32.h>
#endif
+extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
+
+#ifdef CONFIG_SMP
+#define __pte_free_tlb(tlb,ptepage) \
+do { \
+ pgtable_page_dtor(ptepage); \
+ pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
+ PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
+} while (0)
+#else
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
+#endif
+
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 6ab7c67cb5ab..f69a4d977729 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -228,9 +228,10 @@ extern int icache_44x_need_flush;
* - FILE *must* be in the bottom three bits because swap cache
* entries use the top 29 bits for TLB2.
*
- * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it
- * doesn't support SMP. So we can use this as software bit, like
- * DIRTY.
+ * - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
+ * because it doesn't support SMP. However, some later 460 variants
+ * have -some- form of SMP support and so I keep the bit there for
+ * future use
*
* With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
* for memory protection related functions (see PTE structure in
@@ -436,20 +437,23 @@ extern int icache_44x_need_flush;
_PAGE_USER | _PAGE_ACCESSED | \
_PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
_PAGE_EXEC | _PAGE_HWEXEC)
+
/*
- * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
- * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
- * to have it in the Linux PTE, and in fact the bit could be reused for
- * another purpose. -- paulus.
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
*/
-
-#ifdef CONFIG_44x
-#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED)
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
#else
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
#endif
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)
+
#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)
+#define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE)
#ifdef CONFIG_PPC_STD_MMU
/* On standard PPC MMU, no user access implies kernel read/write access,
@@ -459,7 +463,7 @@ extern int icache_44x_need_flush;
#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED)
#endif
-#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED)
#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC)
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
@@ -552,9 +556,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
-static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
-static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
-
static inline pte_t pte_wrprotect(pte_t pte) {
pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
static inline pte_t pte_mkclean(pte_t pte) {
@@ -693,10 +694,11 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
#endif
}
+
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
-#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
+#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) && defined(CONFIG_DEBUG_VM)
WARN_ON(pte_present(*ptep));
#endif
__set_pte_at(mm, addr, ptep, pte);
@@ -760,16 +762,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
__changed; \
})
-/*
- * Macro to mark a page protection value as "uncacheable".
- */
-#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
-
-struct file;
-extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-
#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 4c0a8c62859d..b0f18be81d9f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -100,7 +100,7 @@
#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
-/* __pgprot defined in arch/powerpc/incliude/asm/page.h */
+/* __pgprot defined in arch/powerpc/include/asm/page.h */
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
@@ -245,9 +245,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
-static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
-static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
-
static inline pte_t pte_wrprotect(pte_t pte) {
pte_val(pte) &= ~(_PAGE_RW); return pte; }
static inline pte_t pte_mkclean(pte_t pte) {
@@ -405,16 +402,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
__changed; \
})
-/*
- * Macro to mark a page protection value as "uncacheable".
- */
-#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
-
-struct file;
-extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-
#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index dbb8ca172e44..07f55e601696 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -16,6 +16,32 @@ struct mm_struct;
#endif
#ifndef __ASSEMBLY__
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+
+#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
+ _PAGE_WRITETHRU)
+
+#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+ _PAGE_NO_CACHE | _PAGE_GUARDED))
+
+#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+ _PAGE_NO_CACHE))
+
+#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+ _PAGE_COHERENT))
+
+#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+ _PAGE_COHERENT | _PAGE_WRITETHRU))
+
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index c4a029ccb4d3..1a0d628eb114 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -425,14 +425,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define fromreal(rd) tovirt(rd,rd)
#define tophys(rd,rs) \
-0: addis rd,rs,-KERNELBASE@h; \
+0: addis rd,rs,-PAGE_OFFSET@h; \
.section ".vtop_fixup","aw"; \
.align 1; \
.long 0b; \
.previous
#define tovirt(rd,rs) \
-0: addis rd,rs,KERNELBASE@h; \
+0: addis rd,rs,PAGE_OFFSET@h; \
.section ".ptov_fixup","aw"; \
.align 1; \
.long 0b; \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 101ed87f7d84..d3466490104a 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -69,8 +69,6 @@ extern int _prep_type;
#ifdef __KERNEL__
-extern int have_of;
-
struct task_struct;
void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
void release_thread(struct task_struct *);
@@ -207,6 +205,11 @@ struct thread_struct {
#define INIT_SP_LIMIT \
(_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
+#ifdef CONFIG_SPE
+#define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
+#else
+#define SPEFSCR_INIT
+#endif
#ifdef CONFIG_PPC32
#define INIT_THREAD { \
@@ -215,6 +218,7 @@ struct thread_struct {
.fs = KERNEL_DS, \
.pgdir = swapper_pg_dir, \
.fpexc_mode = MSR_FE0 | MSR_FE1, \
+ SPEFSCR_INIT \
}
#else
#define INIT_THREAD { \
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index eb3bd2e1c7f6..6ff04185d2aa 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -253,6 +253,9 @@ extern void kdump_move_device_tree(void);
/* CPU OF node matching */
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
+/* cache lookup */
+struct device_node *of_find_next_cache_node(struct device_node *np);
+
/* Get the MAC address */
extern const void *of_get_mac_address(struct device_node *np);
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index f9e34c493cbb..eead5c67197a 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -305,30 +305,36 @@ static inline const char* ps3_result(int result)
/* system bus routines */
enum ps3_match_id {
- PS3_MATCH_ID_EHCI = 1,
- PS3_MATCH_ID_OHCI = 2,
- PS3_MATCH_ID_GELIC = 3,
- PS3_MATCH_ID_AV_SETTINGS = 4,
- PS3_MATCH_ID_SYSTEM_MANAGER = 5,
- PS3_MATCH_ID_STOR_DISK = 6,
- PS3_MATCH_ID_STOR_ROM = 7,
- PS3_MATCH_ID_STOR_FLASH = 8,
- PS3_MATCH_ID_SOUND = 9,
- PS3_MATCH_ID_GRAPHICS = 10,
- PS3_MATCH_ID_LPM = 11,
+ PS3_MATCH_ID_EHCI = 1,
+ PS3_MATCH_ID_OHCI = 2,
+ PS3_MATCH_ID_GELIC = 3,
+ PS3_MATCH_ID_AV_SETTINGS = 4,
+ PS3_MATCH_ID_SYSTEM_MANAGER = 5,
+ PS3_MATCH_ID_STOR_DISK = 6,
+ PS3_MATCH_ID_STOR_ROM = 7,
+ PS3_MATCH_ID_STOR_FLASH = 8,
+ PS3_MATCH_ID_SOUND = 9,
+ PS3_MATCH_ID_GPU = 10,
+ PS3_MATCH_ID_LPM = 11,
};
-#define PS3_MODULE_ALIAS_EHCI "ps3:1"
-#define PS3_MODULE_ALIAS_OHCI "ps3:2"
-#define PS3_MODULE_ALIAS_GELIC "ps3:3"
-#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4"
-#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5"
-#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6"
-#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7"
-#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8"
-#define PS3_MODULE_ALIAS_SOUND "ps3:9"
-#define PS3_MODULE_ALIAS_GRAPHICS "ps3:10"
-#define PS3_MODULE_ALIAS_LPM "ps3:11"
+enum ps3_match_sub_id {
+ PS3_MATCH_SUB_ID_GPU_FB = 1,
+ PS3_MATCH_SUB_ID_GPU_RAMDISK = 2,
+};
+
+#define PS3_MODULE_ALIAS_EHCI "ps3:1:0"
+#define PS3_MODULE_ALIAS_OHCI "ps3:2:0"
+#define PS3_MODULE_ALIAS_GELIC "ps3:3:0"
+#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4:0"
+#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5:0"
+#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6:0"
+#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7:0"
+#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8:0"
+#define PS3_MODULE_ALIAS_SOUND "ps3:9:0"
+#define PS3_MODULE_ALIAS_GPU_FB "ps3:10:1"
+#define PS3_MODULE_ALIAS_GPU_RAMDISK "ps3:10:2"
+#define PS3_MODULE_ALIAS_LPM "ps3:11:0"
enum ps3_system_bus_device_type {
PS3_DEVICE_TYPE_IOC0 = 1,
@@ -337,11 +343,6 @@ enum ps3_system_bus_device_type {
PS3_DEVICE_TYPE_LPM,
};
-enum ps3_match_sub_id {
- /* for PS3_MATCH_ID_GRAPHICS */
- PS3_MATCH_SUB_ID_FB = 1,
-};
-
/**
* struct ps3_system_bus_device - a device on the system bus
*/
@@ -516,4 +517,7 @@ void ps3_sync_irq(int node);
u32 ps3_get_hw_thread_id(int cpu);
u64 ps3_get_spe_id(void *arg);
+/* mutex synchronizing GPU accesses and video mode changes */
+extern struct mutex ps3_gpu_mutex;
+
#endif
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h
index 5aa22cffdbd6..cd24ac16660a 100644
--- a/arch/powerpc/include/asm/ps3av.h
+++ b/arch/powerpc/include/asm/ps3av.h
@@ -740,8 +740,4 @@ extern int ps3av_audio_mute(int);
extern int ps3av_audio_mute_analog(int);
extern int ps3av_dev_open(void);
extern int ps3av_dev_close(void);
-extern void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data),
- void *flip_data);
-extern void ps3av_flip_ctl(int on);
-
#endif /* _ASM_POWERPC_PS3AV_H_ */
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h
index edee15d269ea..a0a15311d0d8 100644
--- a/arch/powerpc/include/asm/qe.h
+++ b/arch/powerpc/include/asm/qe.h
@@ -17,6 +17,8 @@
#ifdef __KERNEL__
#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/err.h>
#include <asm/cpm.h>
#include <asm/immap_qe.h>
@@ -84,7 +86,11 @@ static inline bool qe_clock_is_brg(enum qe_clock clk)
extern spinlock_t cmxgcr_lock;
/* Export QE common operations */
+#ifdef CONFIG_QUICC_ENGINE
extern void __init qe_reset(void);
+#else
+static inline void qe_reset(void) {}
+#endif
/* QE PIO */
#define QE_PIO_PINS 32
@@ -101,16 +107,43 @@ struct qe_pio_regs {
#endif
};
-extern int par_io_init(struct device_node *np);
-extern int par_io_of_config(struct device_node *np);
#define QE_PIO_DIR_IN 2
#define QE_PIO_DIR_OUT 1
extern void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin,
int dir, int open_drain, int assignment,
int has_irq);
+#ifdef CONFIG_QUICC_ENGINE
+extern int par_io_init(struct device_node *np);
+extern int par_io_of_config(struct device_node *np);
extern int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
int assignment, int has_irq);
extern int par_io_data_set(u8 port, u8 pin, u8 val);
+#else
+static inline int par_io_init(struct device_node *np) { return -ENOSYS; }
+static inline int par_io_of_config(struct device_node *np) { return -ENOSYS; }
+static inline int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
+ int assignment, int has_irq) { return -ENOSYS; }
+static inline int par_io_data_set(u8 port, u8 pin, u8 val) { return -ENOSYS; }
+#endif /* CONFIG_QUICC_ENGINE */
+
+/*
+ * Pin multiplexing functions.
+ */
+struct qe_pin;
+#ifdef CONFIG_QE_GPIO
+extern struct qe_pin *qe_pin_request(struct device_node *np, int index);
+extern void qe_pin_free(struct qe_pin *qe_pin);
+extern void qe_pin_set_gpio(struct qe_pin *qe_pin);
+extern void qe_pin_set_dedicated(struct qe_pin *pin);
+#else
+static inline struct qe_pin *qe_pin_request(struct device_node *np, int index)
+{
+ return ERR_PTR(-ENOSYS);
+}
+static inline void qe_pin_free(struct qe_pin *qe_pin) {}
+static inline void qe_pin_set_gpio(struct qe_pin *qe_pin) {}
+static inline void qe_pin_set_dedicated(struct qe_pin *pin) {}
+#endif /* CONFIG_QE_GPIO */
/* QE internal API */
int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input);
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h
index 56a7745ca343..cf519663a791 100644
--- a/arch/powerpc/include/asm/qe_ic.h
+++ b/arch/powerpc/include/asm/qe_ic.h
@@ -17,6 +17,9 @@
#include <linux/irq.h>
+struct device_node;
+struct qe_ic;
+
#define NUM_OF_QE_IC_GROUPS 6
/* Flags when we init the QE IC */
@@ -54,17 +57,27 @@ enum qe_ic_grp_id {
QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
};
+#ifdef CONFIG_QUICC_ENGINE
void qe_ic_init(struct device_node *node, unsigned int flags,
void (*low_handler)(unsigned int irq, struct irq_desc *desc),
void (*high_handler)(unsigned int irq, struct irq_desc *desc));
+unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
+unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
+#else
+static inline void qe_ic_init(struct device_node *node, unsigned int flags,
+ void (*low_handler)(unsigned int irq, struct irq_desc *desc),
+ void (*high_handler)(unsigned int irq, struct irq_desc *desc))
+{}
+static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
+{ return 0; }
+static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
+{ return 0; }
+#endif /* CONFIG_QUICC_ENGINE */
+
void qe_ic_set_highest_priority(unsigned int virq, int high);
int qe_ic_set_priority(unsigned int virq, unsigned int priority);
int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
-struct qe_ic;
-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
-
static inline void qe_ic_cascade_low_ipic(unsigned int irq,
struct irq_desc *desc)
{
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c6d1ab650778..f484a343efba 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -783,6 +783,10 @@ extern void scom970_write(unsigned int address, unsigned long value);
#define __get_SP() ({unsigned long sp; \
asm volatile("mr %0,1": "=r" (sp)); sp;})
+struct pt_regs;
+
+extern void ppc_save_regs(struct pt_regs *regs);
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 8eaa7b28d9d0..e0175beb4462 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -168,6 +168,7 @@ extern void rtas_os_term(char *str);
extern int rtas_get_sensor(int sensor, int index, int *state);
extern int rtas_get_power_level(int powerdomain, int *level);
extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
+extern bool rtas_indicator_present(int token, int *maxindex);
extern int rtas_set_indicator(int indicator, int index, int new_value);
extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
extern void rtas_progress(char *s, unsigned short hex);
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
index ced34f1dc8f8..3d9f831c3c55 100644
--- a/arch/powerpc/include/asm/sfp-machine.h
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -82,7 +82,7 @@
#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
-#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y)
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y)
#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
/* These macros define what NaN looks like. They're supposed to expand to
@@ -97,6 +97,20 @@
#define _FP_KEEPNANFRACP 1
+#ifdef FP_EX_BOOKE_E500_SPE
+#define FP_EX_INEXACT (1 << 21)
+#define FP_EX_INVALID (1 << 20)
+#define FP_EX_DIVZERO (1 << 19)
+#define FP_EX_UNDERFLOW (1 << 18)
+#define FP_EX_OVERFLOW (1 << 17)
+#define FP_INHIBIT_RESULTS 0
+
+#define __FPU_FPSCR (current->thread.spefscr)
+#define __FPU_ENABLED_EXC \
+({ \
+ (__FPU_FPSCR >> 2) & 0x1f; \
+})
+#else
/* Exception flags. We use the bit positions of the appropriate bits
in the FPSCR, which also correspond to the FE_* bits. This makes
everything easier ;-). */
@@ -111,22 +125,6 @@
#define FP_EX_DIVZERO (1 << (31 - 5))
#define FP_EX_INEXACT (1 << (31 - 6))
-/* This macro appears to be called when both X and Y are NaNs, and
- * has to choose one and copy it to R. i386 goes for the larger of the
- * two, sparc64 just picks Y. I don't understand this at all so I'll
- * go with sparc64 because it's shorter :-> -- PMM
- */
-#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
- do { \
- R##_s = Y##_s; \
- _FP_FRAC_COPY_##wc(R,Y); \
- R##_c = FP_CLS_NAN; \
- } while (0)
-
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-
#define __FPU_FPSCR (current->thread.fpscr.val)
/* We only actually write to the destination register
@@ -137,6 +135,32 @@
(__FPU_FPSCR >> 3) & 0x1f; \
})
+#endif
+
+/*
+ * If one NaN is signaling and the other is not,
+ * we choose that one, otherwise we choose X.
+ */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
+ do { \
+ if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
+ && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
+ { \
+ R##_s = X##_s; \
+ _FP_FRAC_COPY_##wc(R,X); \
+ } \
+ else \
+ { \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R,Y); \
+ } \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
#define __FPU_TRAP_P(bits) \
((__FPU_ENABLED_EXC & (bits)) != 0)
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 1866cec4f967..c25f73d1d842 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -81,6 +81,13 @@ extern int cpu_to_core_id(int cpu);
#define PPC_MSG_CALL_FUNC_SINGLE 2
#define PPC_MSG_DEBUGGER_BREAK 3
+/*
+ * irq controllers that have dedicated ipis per message and don't
+ * need additional code in the action handler may use this
+ */
+extern int smp_request_message_ipi(int virq, int message);
+extern const char *smp_ipi_name[];
+
void smp_init_iSeries(void);
void smp_init_pSeries(void);
void smp_init_cell(void);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index f56a843f4705..36864364e601 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -277,7 +277,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
bne- 1b"
: "=&r"(tmp)
: "r"(&rw->lock)
- : "cr0", "memory");
+ : "cr0", "xer", "memory");
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 8b2eb044270a..0ab8d869e3d6 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -128,7 +128,7 @@ struct spu {
int number;
unsigned int irqs[3];
u32 node;
- u64 flags;
+ unsigned long flags;
u64 class_0_pending;
u64 class_0_dar;
u64 class_1_dar;
diff --git a/arch/powerpc/include/asm/swab.h b/arch/powerpc/include/asm/swab.h
new file mode 100644
index 000000000000..ef824ae4b79c
--- /dev/null
+++ b/arch/powerpc/include/asm/swab.h
@@ -0,0 +1,90 @@
+#ifndef _ASM_POWERPC_SWAB_H
+#define _ASM_POWERPC_SWAB_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/types.h>
+#include <linux/compiler.h>
+
+#ifdef __GNUC__
+
+#ifndef __powerpc64__
+#define __SWAB_64_THRU_32__
+#endif /* __powerpc64__ */
+
+#ifdef __KERNEL__
+
+static __inline__ __u16 ld_le16(const volatile __u16 *addr)
+{
+ __u16 val;
+
+ __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+ return val;
+}
+#define __arch_swab16p ld_le16
+
+static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
+{
+ __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+static inline void __arch_swab16s(__u16 *addr)
+{
+ st_le16(addr, *addr);
+}
+#define __arch_swab16s __arch_swab16s
+
+static __inline__ __u32 ld_le32(const volatile __u32 *addr)
+{
+ __u32 val;
+
+ __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
+ return val;
+}
+#define __arch_swab32p ld_le32
+
+static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
+{
+ __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
+}
+
+static inline void __arch_swab32s(__u32 *addr)
+{
+ st_le32(addr, *addr);
+}
+#define __arch_swab32s __arch_swab32s
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 value)
+{
+ __u16 result;
+
+ __asm__("rlwimi %0,%1,8,16,23"
+ : "=r" (result)
+ : "r" (value), "0" (value >> 8));
+ return result;
+}
+#define __arch_swab16 __arch_swab16
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 value)
+{
+ __u32 result;
+
+ __asm__("rlwimi %0,%1,24,16,23\n\t"
+ "rlwimi %0,%1,8,8,15\n\t"
+ "rlwimi %0,%1,24,0,7"
+ : "=r" (result)
+ : "r" (value), "0" (value >> 24));
+ return result;
+}
+#define __arch_swab32 __arch_swab32
+
+#endif /* __KERNEL__ */
+
+#endif /* __GNUC__ */
+
+#endif /* _ASM_POWERPC_SWAB_H */
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index 45963e80f557..28f6ddbff4cf 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -5,6 +5,10 @@
#include <linux/stringify.h>
#include <asm/feature-fixups.h>
+#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
+#define __SUBARCH_HAS_LWSYNC
+#endif
+
#ifndef __ASSEMBLY__
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index d6648c143322..2a4be19a92c4 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -23,15 +23,17 @@
* read_barrier_depends() prevents data-dependent loads being reordered
* across this point (nop on PPC).
*
- * We have to use the sync instructions for mb(), since lwsync doesn't
- * order loads with respect to previous stores. Lwsync is fine for
- * rmb(), though. Note that rmb() actually uses a sync on 32-bit
- * architectures.
+ * *mb() variants without smp_ prefix must order all types of memory
+ * operations with one another. sync is the only instruction sufficient
+ * to do this.
*
- * For wmb(), we use sync since wmb is used in drivers to order
- * stores to system memory with respect to writes to the device.
- * However, smp_wmb() can be a lighter-weight lwsync or eieio barrier
- * on SMP since it is only used to order updates to system memory.
+ * For the smp_ barriers, ordering is for cacheable memory operations
+ * only. We have to use the sync instruction for smp_mb(), since lwsync
+ * doesn't order loads with respect to previous stores. Lwsync can be
+ * used for smp_rmb() and smp_wmb().
+ *
+ * However, on CPUs that don't support lwsync, lwsync actually maps to a
+ * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
@@ -45,14 +47,14 @@
#ifdef CONFIG_SMP
#ifdef __SUBARCH_HAS_LWSYNC
-# define SMPWMB lwsync
+# define SMPWMB LWSYNC
#else
# define SMPWMB eieio
#endif
#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() __asm__ __volatile__ (__stringify(SMPWMB) : : :"memory")
+#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index febd581ec9b0..27ccb764fdab 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -48,26 +48,6 @@ extern unsigned long ppc_proc_freq;
extern unsigned long ppc_tb_freq;
#define DEFAULT_TB_FREQ 125000000UL
-/*
- * By putting all of this stuff into a single struct we
- * reduce the number of cache lines touched by do_gettimeofday.
- * Both by collecting all of the data in one cache line and
- * by touching only one TOC entry on ppc64.
- */
-struct gettimeofday_vars {
- u64 tb_to_xs;
- u64 stamp_xsec;
- u64 tb_orig_stamp;
-};
-
-struct gettimeofday_struct {
- unsigned long tb_ticks_per_sec;
- struct gettimeofday_vars vars[2];
- struct gettimeofday_vars * volatile varp;
- unsigned var_idx;
- unsigned tb_to_us;
-};
-
struct div_result {
u64 result_high;
u64 result_low;
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index a2c6bfd85fb7..abbe3419d1dd 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -6,6 +6,9 @@
*
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
+ * - local_flush_tlb_mm(mm) flushes the specified mm context on
+ * the local processor
+ * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
* - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
@@ -17,7 +20,7 @@
*/
#ifdef __KERNEL__
-#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
+#ifdef CONFIG_PPC_MMU_NOHASH
/*
* TLB flushing for software loaded TLB chips
*
@@ -28,63 +31,49 @@
#include <linux/mm.h>
-extern void _tlbie(unsigned long address, unsigned int pid);
-extern void _tlbil_all(void);
-extern void _tlbil_pid(unsigned int pid);
-extern void _tlbil_va(unsigned long address, unsigned int pid);
+#define MMU_NO_CONTEXT ((unsigned int)-1)
-#if defined(CONFIG_40x) || defined(CONFIG_8xx)
-#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
-#else /* CONFIG_44x || CONFIG_FSL_BOOKE */
-extern void _tlbia(void);
-#endif
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
- _tlbil_pid(mm->context.id);
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
-{
- _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0);
-}
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
- unsigned long vmaddr)
-{
- flush_tlb_page(vma, vmaddr);
-}
+extern void local_flush_tlb_mm(struct mm_struct *mm);
+extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- _tlbil_pid(vma->vm_mm->context.id);
-}
+#ifdef CONFIG_SMP
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+#else
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
+#endif
+#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
-{
- _tlbil_pid(0);
-}
+#elif defined(CONFIG_PPC_STD_MMU_32)
-#elif defined(CONFIG_PPC32)
/*
- * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx
+ * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
*/
-extern void _tlbie(unsigned long address);
-extern void _tlbia(void);
-
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ flush_tlb_page(vma, vmaddr);
+}
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ flush_tlb_mm(mm);
+}
+
+#elif defined(CONFIG_PPC_STD_MMU_64)
-#else
/*
- * TLB flushing for 64-bit has-MMU CPUs
+ * TLB flushing for 64-bit hash-MMU CPUs
*/
#include <linux/percpu.h>
@@ -134,10 +123,19 @@ extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
extern void flush_hash_range(unsigned long number, int local);
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+}
+
static inline void flush_tlb_mm(struct mm_struct *mm)
{
}
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+}
+
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
@@ -162,7 +160,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
unsigned long end);
-
+#else
+#error Unsupported MMU type
#endif
#endif /*__KERNEL__ */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index c32da6f97999..375258559ae6 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -22,11 +22,11 @@ static inline cpumask_t node_to_cpumask(int node)
return numa_cpumask_lookup_table[node];
}
+#define cpumask_of_node(node) (&numa_cpumask_lookup_table[node])
+
static inline int node_to_first_cpu(int node)
{
- cpumask_t tmp;
- tmp = node_to_cpumask(node);
- return first_cpu(tmp);
+ return cpumask_first(cpumask_of_node(node));
}
int of_node_to_nid(struct device_node *device);
@@ -46,9 +46,12 @@ static inline int pcibus_to_node(struct pci_bus *bus)
node_to_cpumask(pcibus_to_node(bus)) \
)
+#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
+ cpu_all_mask : \
+ cpumask_of_node(pcibus_to_node(bus)))
+
/* sched_domains SD_NODE_INIT for PPC64 machines */
#define SD_NODE_INIT (struct sched_domain) { \
- .span = CPU_MASK_NONE, \
.parent = NULL, \
.child = NULL, \
.groups = NULL, \
@@ -109,6 +112,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
+#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
+#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
#endif
#endif
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index f01393224b52..13c2c283e178 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -39,6 +39,7 @@
#ifndef __ASSEMBLY__
#include <linux/unistd.h>
+#include <linux/time.h>
#define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32)
@@ -83,6 +84,7 @@ struct vdso_data {
__u32 icache_log_block_size; /* L1 i-cache log block size */
__s32 wtom_clock_sec; /* Wall to monotonic clock */
__s32 wtom_clock_nsec;
+ struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
__u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
};
@@ -102,6 +104,7 @@ struct vdso_data {
__u32 tz_dsttime; /* Type of dst correction 0x5C */
__s32 wtom_clock_sec; /* Wall to monotonic clock */
__s32 wtom_clock_nsec;
+ struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
__u32 dcache_block_size; /* L1 d-cache block size */
__u32 icache_block_size; /* L1 i-cache block size */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 92673b43858d..8d1a419df35d 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -17,6 +17,7 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog
CFLAGS_REMOVE_btext.o = -pg -mno-sched-epilog
+CFLAGS_REMOVE_prom.o = -pg -mno-sched-epilog
ifdef CONFIG_DYNAMIC_FTRACE
# dynamic ftrace setup.
@@ -28,7 +29,7 @@ endif
obj-y := cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \
init_task.o process.o systbl.o idle.o \
- signal.o sysfs.o
+ signal.o sysfs.o cacheinfo.o
obj-y += vdso32/
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
@@ -102,6 +103,10 @@ endif
obj-$(CONFIG_PPC64) += $(obj64-y)
+ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),)
+obj-y += ppc_save_regs.o
+endif
+
extra-$(CONFIG_PPC_FPU) += fpu.o
extra-$(CONFIG_PPC64) += entry_64.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 75c5dd0138fd..9937fe44555f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -23,9 +23,6 @@
#include <linux/mm.h>
#include <linux/suspend.h>
#include <linux/hrtimer.h>
-#ifdef CONFIG_KVM
-#include <linux/kvm_host.h>
-#endif
#ifdef CONFIG_PPC64
#include <linux/time.h>
#include <linux/hardirq.h>
@@ -51,6 +48,9 @@
#ifdef CONFIG_PPC_ISERIES
#include <asm/iseries/alpaca.h>
#endif
+#ifdef CONFIG_KVM
+#include <asm/kvm_44x.h>
+#endif
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
#include "head_booke.h"
@@ -60,6 +60,7 @@ int main(void)
{
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(MM, offsetof(struct task_struct, mm));
+ DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id));
#ifdef CONFIG_PPC64
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
#else
@@ -306,6 +307,7 @@ int main(void)
DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32));
DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec));
DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
+ DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime));
DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size));
DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size));
DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size));
@@ -355,12 +357,10 @@ int main(void)
DEFINE(PTE_SIZE, sizeof(pte_t));
#ifdef CONFIG_KVM
- DEFINE(TLBE_BYTES, sizeof(struct tlbe));
+ DEFINE(TLBE_BYTES, sizeof(struct kvmppc_44x_tlbe));
DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
- DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
- DEFINE(VCPU_SHADOW_MOD, offsetof(struct kvm_vcpu, arch.shadow_tlb_mod));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
@@ -378,6 +378,21 @@ int main(void)
DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
#endif
+#ifdef CONFIG_44x
+ DEFINE(PGD_T_LOG2, PGD_T_LOG2);
+ DEFINE(PTE_T_LOG2, PTE_T_LOG2);
+#endif
+
+#ifdef CONFIG_KVM_EXIT_TIMING
+ DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
+ arch.timing_exit.tv32.tbu));
+ DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu,
+ arch.timing_exit.tv32.tbl));
+ DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu,
+ arch.timing_last_enter.tv32.tbu));
+ DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu,
+ arch.timing_last_enter.tv32.tbl));
+#endif
return 0;
}
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
new file mode 100644
index 000000000000..b33f0417a4bf
--- /dev/null
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -0,0 +1,837 @@
+/*
+ * Processor cache information made available to userspace via sysfs;
+ * intended to be compatible with x86 intel_cacheinfo implementation.
+ *
+ * Copyright 2008 IBM Corporation
+ * Author: Nathan Lynch
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/percpu.h>
+#include <asm/prom.h>
+
+#include "cacheinfo.h"
+
+/* per-cpu object for tracking:
+ * - a "cache" kobject for the top-level directory
+ * - a list of "index" objects representing the cpu's local cache hierarchy
+ */
+struct cache_dir {
+ struct kobject *kobj; /* bare (not embedded) kobject for cache
+ * directory */
+ struct cache_index_dir *index; /* list of index objects */
+};
+
+/* "index" object: each cpu's cache directory has an index
+ * subdirectory corresponding to a cache object associated with the
+ * cpu. This object's lifetime is managed via the embedded kobject.
+ */
+struct cache_index_dir {
+ struct kobject kobj;
+ struct cache_index_dir *next; /* next index in parent directory */
+ struct cache *cache;
+};
+
+/* Template for determining which OF properties to query for a given
+ * cache type */
+struct cache_type_info {
+ const char *name;
+ const char *size_prop;
+
+ /* Allow for both [di]-cache-line-size and
+ * [di]-cache-block-size properties. According to the PowerPC
+ * Processor binding, -line-size should be provided if it
+ * differs from the cache block size (that which is operated
+ * on by cache instructions), so we look for -line-size first.
+ * See cache_get_line_size(). */
+
+ const char *line_size_props[2];
+ const char *nr_sets_prop;
+};
+
+/* These are used to index the cache_type_info array. */
+#define CACHE_TYPE_UNIFIED 0
+#define CACHE_TYPE_INSTRUCTION 1
+#define CACHE_TYPE_DATA 2
+
+static const struct cache_type_info cache_type_info[] = {
+ {
+ /* PowerPC Processor binding says the [di]-cache-*
+ * must be equal on unified caches, so just use
+ * d-cache properties. */
+ .name = "Unified",
+ .size_prop = "d-cache-size",
+ .line_size_props = { "d-cache-line-size",
+ "d-cache-block-size", },
+ .nr_sets_prop = "d-cache-sets",
+ },
+ {
+ .name = "Instruction",
+ .size_prop = "i-cache-size",
+ .line_size_props = { "i-cache-line-size",
+ "i-cache-block-size", },
+ .nr_sets_prop = "i-cache-sets",
+ },
+ {
+ .name = "Data",
+ .size_prop = "d-cache-size",
+ .line_size_props = { "d-cache-line-size",
+ "d-cache-block-size", },
+ .nr_sets_prop = "d-cache-sets",
+ },
+};
+
+/* Cache object: each instance of this corresponds to a distinct cache
+ * in the system. There are separate objects for Harvard caches: one
+ * each for instruction and data, and each refers to the same OF node.
+ * The refcount of the OF node is elevated for the lifetime of the
+ * cache object. A cache object is released when its shared_cpu_map
+ * is cleared (see cache_cpu_clear).
+ *
+ * A cache object is on two lists: an unsorted global list
+ * (cache_list) of cache objects; and a singly-linked list
+ * representing the local cache hierarchy, which is ordered by level
+ * (e.g. L1d -> L1i -> L2 -> L3).
+ */
+struct cache {
+ struct device_node *ofnode; /* OF node for this cache, may be cpu */
+ struct cpumask shared_cpu_map; /* online CPUs using this cache */
+ int type; /* split cache disambiguation */
+ int level; /* level not explicit in device tree */
+ struct list_head list; /* global list of cache objects */
+ struct cache *next_local; /* next cache of >= level */
+};
+
+static DEFINE_PER_CPU(struct cache_dir *, cache_dir);
+
+/* traversal/modification of this list occurs only at cpu hotplug time;
+ * access is serialized by cpu hotplug locking
+ */
+static LIST_HEAD(cache_list);
+
+static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
+{
+ return container_of(k, struct cache_index_dir, kobj);
+}
+
+static const char *cache_type_string(const struct cache *cache)
+{
+ return cache_type_info[cache->type].name;
+}
+
+static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode)
+{
+ cache->type = type;
+ cache->level = level;
+ cache->ofnode = of_node_get(ofnode);
+ INIT_LIST_HEAD(&cache->list);
+ list_add(&cache->list, &cache_list);
+}
+
+static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode)
+{
+ struct cache *cache;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+ if (cache)
+ cache_init(cache, type, level, ofnode);
+
+ return cache;
+}
+
+static void release_cache_debugcheck(struct cache *cache)
+{
+ struct cache *iter;
+
+ list_for_each_entry(iter, &cache_list, list)
+ WARN_ONCE(iter->next_local == cache,
+ "cache for %s(%s) refers to cache for %s(%s)\n",
+ iter->ofnode->full_name,
+ cache_type_string(iter),
+ cache->ofnode->full_name,
+ cache_type_string(cache));
+}
+
+static void release_cache(struct cache *cache)
+{
+ if (!cache)
+ return;
+
+ pr_debug("freeing L%d %s cache for %s\n", cache->level,
+ cache_type_string(cache), cache->ofnode->full_name);
+
+ release_cache_debugcheck(cache);
+ list_del(&cache->list);
+ of_node_put(cache->ofnode);
+ kfree(cache);
+}
+
+static void cache_cpu_set(struct cache *cache, int cpu)
+{
+ struct cache *next = cache;
+
+ while (next) {
+ WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
+ "CPU %i already accounted in %s(%s)\n",
+ cpu, next->ofnode->full_name,
+ cache_type_string(next));
+ cpumask_set_cpu(cpu, &next->shared_cpu_map);
+ next = next->next_local;
+ }
+}
+
+static int cache_size(const struct cache *cache, unsigned int *ret)
+{
+ const char *propname;
+ const u32 *cache_size;
+
+ propname = cache_type_info[cache->type].size_prop;
+
+ cache_size = of_get_property(cache->ofnode, propname, NULL);
+ if (!cache_size)
+ return -ENODEV;
+
+ *ret = *cache_size;
+ return 0;
+}
+
+static int cache_size_kb(const struct cache *cache, unsigned int *ret)
+{
+ unsigned int size;
+
+ if (cache_size(cache, &size))
+ return -ENODEV;
+
+ *ret = size / 1024;
+ return 0;
+}
+
+/* not cache_line_size() because that's a macro in include/linux/cache.h */
+static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
+{
+ const u32 *line_size;
+ int i, lim;
+
+ lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
+
+ for (i = 0; i < lim; i++) {
+ const char *propname;
+
+ propname = cache_type_info[cache->type].line_size_props[i];
+ line_size = of_get_property(cache->ofnode, propname, NULL);
+ if (line_size)
+ break;
+ }
+
+ if (!line_size)
+ return -ENODEV;
+
+ *ret = *line_size;
+ return 0;
+}
+
+static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
+{
+ const char *propname;
+ const u32 *nr_sets;
+
+ propname = cache_type_info[cache->type].nr_sets_prop;
+
+ nr_sets = of_get_property(cache->ofnode, propname, NULL);
+ if (!nr_sets)
+ return -ENODEV;
+
+ *ret = *nr_sets;
+ return 0;
+}
+
+static int cache_associativity(const struct cache *cache, unsigned int *ret)
+{
+ unsigned int line_size;
+ unsigned int nr_sets;
+ unsigned int size;
+
+ if (cache_nr_sets(cache, &nr_sets))
+ goto err;
+
+ /* If the cache is fully associative, there is no need to
+ * check the other properties.
+ */
+ if (nr_sets == 1) {
+ *ret = 0;
+ return 0;
+ }
+
+ if (cache_get_line_size(cache, &line_size))
+ goto err;
+ if (cache_size(cache, &size))
+ goto err;
+
+ if (!(nr_sets > 0 && size > 0 && line_size > 0))
+ goto err;
+
+ *ret = (size / nr_sets) / line_size;
+ return 0;
+err:
+ return -ENODEV;
+}
+
+/* helper for dealing with split caches */
+static struct cache *cache_find_first_sibling(struct cache *cache)
+{
+ struct cache *iter;
+
+ if (cache->type == CACHE_TYPE_UNIFIED)
+ return cache;
+
+ list_for_each_entry(iter, &cache_list, list)
+ if (iter->ofnode == cache->ofnode && iter->next_local == cache)
+ return iter;
+
+ return cache;
+}
+
+/* return the first cache on a local list matching node */
+static struct cache *cache_lookup_by_node(const struct device_node *node)
+{
+ struct cache *cache = NULL;
+ struct cache *iter;
+
+ list_for_each_entry(iter, &cache_list, list) {
+ if (iter->ofnode != node)
+ continue;
+ cache = cache_find_first_sibling(iter);
+ break;
+ }
+
+ return cache;
+}
+
+static bool cache_node_is_unified(const struct device_node *np)
+{
+ return of_get_property(np, "cache-unified", NULL);
+}
+
+static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
+{
+ struct cache *cache;
+
+ pr_debug("creating L%d ucache for %s\n", level, node->full_name);
+
+ cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
+
+ return cache;
+}
+
+static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
+{
+ struct cache *dcache, *icache;
+
+ pr_debug("creating L%d dcache and icache for %s\n", level,
+ node->full_name);
+
+ dcache = new_cache(CACHE_TYPE_DATA, level, node);
+ icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
+
+ if (!dcache || !icache)
+ goto err;
+
+ dcache->next_local = icache;
+
+ return dcache;
+err:
+ release_cache(dcache);
+ release_cache(icache);
+ return NULL;
+}
+
+static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level)
+{
+ struct cache *cache;
+
+ if (cache_node_is_unified(node))
+ cache = cache_do_one_devnode_unified(node, level);
+ else
+ cache = cache_do_one_devnode_split(node, level);
+
+ return cache;
+}
+
+static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level)
+{
+ struct cache *cache;
+
+ cache = cache_lookup_by_node(node);
+
+ WARN_ONCE(cache && cache->level != level,
+ "cache level mismatch on lookup (got %d, expected %d)\n",
+ cache->level, level);
+
+ if (!cache)
+ cache = cache_do_one_devnode(node, level);
+
+ return cache;
+}
+
+static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger)
+{
+ while (smaller->next_local) {
+ if (smaller->next_local == bigger)
+ return; /* already linked */
+ smaller = smaller->next_local;
+ }
+
+ smaller->next_local = bigger;
+}
+
+static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache)
+{
+ WARN_ON_ONCE(cache->level != 1);
+ WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
+}
+
+static void __cpuinit do_subsidiary_caches(struct cache *cache)
+{
+ struct device_node *subcache_node;
+ int level = cache->level;
+
+ do_subsidiary_caches_debugcheck(cache);
+
+ while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
+ struct cache *subcache;
+
+ level++;
+ subcache = cache_lookup_or_instantiate(subcache_node, level);
+ of_node_put(subcache_node);
+ if (!subcache)
+ break;
+
+ link_cache_lists(cache, subcache);
+ cache = subcache;
+ }
+}
+
+static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id)
+{
+ struct device_node *cpu_node;
+ struct cache *cpu_cache = NULL;
+
+ pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
+
+ cpu_node = of_get_cpu_node(cpu_id, NULL);
+ WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
+ if (!cpu_node)
+ goto out;
+
+ cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
+ if (!cpu_cache)
+ goto out;
+
+ do_subsidiary_caches(cpu_cache);
+
+ cache_cpu_set(cpu_cache, cpu_id);
+out:
+ of_node_put(cpu_node);
+
+ return cpu_cache;
+}
+
+static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
+{
+ struct cache_dir *cache_dir;
+ struct sys_device *sysdev;
+ struct kobject *kobj = NULL;
+
+ sysdev = get_cpu_sysdev(cpu_id);
+ WARN_ONCE(!sysdev, "no sysdev for CPU %i\n", cpu_id);
+ if (!sysdev)
+ goto err;
+
+ kobj = kobject_create_and_add("cache", &sysdev->kobj);
+ if (!kobj)
+ goto err;
+
+ cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
+ if (!cache_dir)
+ goto err;
+
+ cache_dir->kobj = kobj;
+
+ WARN_ON_ONCE(per_cpu(cache_dir, cpu_id) != NULL);
+
+ per_cpu(cache_dir, cpu_id) = cache_dir;
+
+ return cache_dir;
+err:
+ kobject_put(kobj);
+ return NULL;
+}
+
+static void cache_index_release(struct kobject *kobj)
+{
+ struct cache_index_dir *index;
+
+ index = kobj_to_cache_index_dir(kobj);
+
+ pr_debug("freeing index directory for L%d %s cache\n",
+ index->cache->level, cache_type_string(index->cache));
+
+ kfree(index);
+}
+
+static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
+{
+ struct kobj_attribute *kobj_attr;
+
+ kobj_attr = container_of(attr, struct kobj_attribute, attr);
+
+ return kobj_attr->show(k, kobj_attr, buf);
+}
+
+static struct cache *index_kobj_to_cache(struct kobject *k)
+{
+ struct cache_index_dir *index;
+
+ index = kobj_to_cache_index_dir(k);
+
+ return index->cache;
+}
+
+static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ unsigned int size_kb;
+ struct cache *cache;
+
+ cache = index_kobj_to_cache(k);
+
+ if (cache_size_kb(cache, &size_kb))
+ return -ENODEV;
+
+ return sprintf(buf, "%uK\n", size_kb);
+}
+
+static struct kobj_attribute cache_size_attr =
+ __ATTR(size, 0444, size_show, NULL);
+
+
+static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ unsigned int line_size;
+ struct cache *cache;
+
+ cache = index_kobj_to_cache(k);
+
+ if (cache_get_line_size(cache, &line_size))
+ return -ENODEV;
+
+ return sprintf(buf, "%u\n", line_size);
+}
+
+static struct kobj_attribute cache_line_size_attr =
+ __ATTR(coherency_line_size, 0444, line_size_show, NULL);
+
+static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ unsigned int nr_sets;
+ struct cache *cache;
+
+ cache = index_kobj_to_cache(k);
+
+ if (cache_nr_sets(cache, &nr_sets))
+ return -ENODEV;
+
+ return sprintf(buf, "%u\n", nr_sets);
+}
+
+static struct kobj_attribute cache_nr_sets_attr =
+ __ATTR(number_of_sets, 0444, nr_sets_show, NULL);
+
+static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ unsigned int associativity;
+ struct cache *cache;
+
+ cache = index_kobj_to_cache(k);
+
+ if (cache_associativity(cache, &associativity))
+ return -ENODEV;
+
+ return sprintf(buf, "%u\n", associativity);
+}
+
+static struct kobj_attribute cache_assoc_attr =
+ __ATTR(ways_of_associativity, 0444, associativity_show, NULL);
+
+static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ struct cache *cache;
+
+ cache = index_kobj_to_cache(k);
+
+ return sprintf(buf, "%s\n", cache_type_string(cache));
+}
+
+static struct kobj_attribute cache_type_attr =
+ __ATTR(type, 0444, type_show, NULL);
+
+static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ struct cache_index_dir *index;
+ struct cache *cache;
+
+ index = kobj_to_cache_index_dir(k);
+ cache = index->cache;
+
+ return sprintf(buf, "%d\n", cache->level);
+}
+
+static struct kobj_attribute cache_level_attr =
+ __ATTR(level, 0444, level_show, NULL);
+
+static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ struct cache_index_dir *index;
+ struct cache *cache;
+ int len;
+ int n = 0;
+
+ index = kobj_to_cache_index_dir(k);
+ cache = index->cache;
+ len = PAGE_SIZE - 2;
+
+ if (len > 1) {
+ n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
+ buf[n++] = '\n';
+ buf[n] = '\0';
+ }
+ return n;
+}
+
+static struct kobj_attribute cache_shared_cpu_map_attr =
+ __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
+
+/* Attributes which should always be created -- the kobject/sysfs core
+ * does this automatically via kobj_type->default_attrs. This is the
+ * minimum data required to uniquely identify a cache.
+ */
+static struct attribute *cache_index_default_attrs[] = {
+ &cache_type_attr.attr,
+ &cache_level_attr.attr,
+ &cache_shared_cpu_map_attr.attr,
+ NULL,
+};
+
+/* Attributes which should be created if the cache device node has the
+ * right properties -- see cacheinfo_create_index_opt_attrs
+ */
+static struct kobj_attribute *cache_index_opt_attrs[] = {
+ &cache_size_attr,
+ &cache_line_size_attr,
+ &cache_nr_sets_attr,
+ &cache_assoc_attr,
+};
+
+static struct sysfs_ops cache_index_ops = {
+ .show = cache_index_show,
+};
+
+static struct kobj_type cache_index_type = {
+ .release = cache_index_release,
+ .sysfs_ops = &cache_index_ops,
+ .default_attrs = cache_index_default_attrs,
+};
+
+static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
+{
+ const char *cache_name;
+ const char *cache_type;
+ struct cache *cache;
+ char *buf;
+ int i;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ cache = dir->cache;
+ cache_name = cache->ofnode->full_name;
+ cache_type = cache_type_string(cache);
+
+ /* We don't want to create an attribute that can't provide a
+ * meaningful value. Check the return value of each optional
+ * attribute's ->show method before registering the
+ * attribute.
+ */
+ for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
+ struct kobj_attribute *attr;
+ ssize_t rc;
+
+ attr = cache_index_opt_attrs[i];
+
+ rc = attr->show(&dir->kobj, attr, buf);
+ if (rc <= 0) {
+ pr_debug("not creating %s attribute for "
+ "%s(%s) (rc = %zd)\n",
+ attr->attr.name, cache_name,
+ cache_type, rc);
+ continue;
+ }
+ if (sysfs_create_file(&dir->kobj, &attr->attr))
+ pr_debug("could not create %s attribute for %s(%s)\n",
+ attr->attr.name, cache_name, cache_type);
+ }
+
+ kfree(buf);
+}
+
+static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir)
+{
+ struct cache_index_dir *index_dir;
+ int rc;
+
+ index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
+ if (!index_dir)
+ goto err;
+
+ index_dir->cache = cache;
+
+ rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
+ cache_dir->kobj, "index%d", index);
+ if (rc)
+ goto err;
+
+ index_dir->next = cache_dir->index;
+ cache_dir->index = index_dir;
+
+ cacheinfo_create_index_opt_attrs(index_dir);
+
+ return;
+err:
+ kfree(index_dir);
+}
+
+static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list)
+{
+ struct cache_dir *cache_dir;
+ struct cache *cache;
+ int index = 0;
+
+ cache_dir = cacheinfo_create_cache_dir(cpu_id);
+ if (!cache_dir)
+ return;
+
+ cache = cache_list;
+ while (cache) {
+ cacheinfo_create_index_dir(cache, index, cache_dir);
+ index++;
+ cache = cache->next_local;
+ }
+}
+
+void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id)
+{
+ struct cache *cache;
+
+ cache = cache_chain_instantiate(cpu_id);
+ if (!cache)
+ return;
+
+ cacheinfo_sysfs_populate(cpu_id, cache);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */
+
+static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
+{
+ struct device_node *cpu_node;
+ struct cache *cache;
+
+ cpu_node = of_get_cpu_node(cpu_id, NULL);
+ WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
+ if (!cpu_node)
+ return NULL;
+
+ cache = cache_lookup_by_node(cpu_node);
+ of_node_put(cpu_node);
+
+ return cache;
+}
+
+static void remove_index_dirs(struct cache_dir *cache_dir)
+{
+ struct cache_index_dir *index;
+
+ index = cache_dir->index;
+
+ while (index) {
+ struct cache_index_dir *next;
+
+ next = index->next;
+ kobject_put(&index->kobj);
+ index = next;
+ }
+}
+
+static void remove_cache_dir(struct cache_dir *cache_dir)
+{
+ remove_index_dirs(cache_dir);
+
+ kobject_put(cache_dir->kobj);
+
+ kfree(cache_dir);
+}
+
+static void cache_cpu_clear(struct cache *cache, int cpu)
+{
+ while (cache) {
+ struct cache *next = cache->next_local;
+
+ WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
+ "CPU %i not accounted in %s(%s)\n",
+ cpu, cache->ofnode->full_name,
+ cache_type_string(cache));
+
+ cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
+
+ /* Release the cache object if all the cpus using it
+ * are offline */
+ if (cpumask_empty(&cache->shared_cpu_map))
+ release_cache(cache);
+
+ cache = next;
+ }
+}
+
+void cacheinfo_cpu_offline(unsigned int cpu_id)
+{
+ struct cache_dir *cache_dir;
+ struct cache *cache;
+
+ /* Prevent userspace from seeing inconsistent state - remove
+ * the sysfs hierarchy first */
+ cache_dir = per_cpu(cache_dir, cpu_id);
+
+ /* careful, sysfs population may have failed */
+ if (cache_dir)
+ remove_cache_dir(cache_dir);
+
+ per_cpu(cache_dir, cpu_id) = NULL;
+
+ /* clear the CPU's bit in its cache chain, possibly freeing
+ * cache objects */
+ cache = cache_lookup_by_cpu(cpu_id);
+ if (cache)
+ cache_cpu_clear(cache, cpu_id);
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/powerpc/kernel/cacheinfo.h b/arch/powerpc/kernel/cacheinfo.h
new file mode 100644
index 000000000000..a7b74d36acd7
--- /dev/null
+++ b/arch/powerpc/kernel/cacheinfo.h
@@ -0,0 +1,8 @@
+#ifndef _PPC_CACHEINFO_H
+#define _PPC_CACHEINFO_H
+
+/* These are just hooks for sysfs.c to use. */
+extern void cacheinfo_cpu_online(unsigned int cpu_id);
+extern void cacheinfo_cpu_offline(unsigned int cpu_id);
+
+#endif /* _PPC_CACHEINFO_H */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 7e8719504f39..923f87aff20a 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -19,6 +19,7 @@
#include <asm/oprofile_impl.h>
#include <asm/cputable.h>
#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
+#include <asm/mmu.h>
struct cpu_spec* cur_cpu_spec = NULL;
EXPORT_SYMBOL(cur_cpu_spec);
@@ -94,6 +95,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER3 (630)",
.cpu_features = CPU_FTRS_POWER3,
.cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -109,6 +111,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER3 (630+)",
.cpu_features = CPU_FTRS_POWER3,
.cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -124,6 +127,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "RS64-II (northstar)",
.cpu_features = CPU_FTRS_RS64,
.cpu_user_features = COMMON_USER_PPC64,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -139,6 +143,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "RS64-III (pulsar)",
.cpu_features = CPU_FTRS_RS64,
.cpu_user_features = COMMON_USER_PPC64,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -154,6 +159,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "RS64-III (icestar)",
.cpu_features = CPU_FTRS_RS64,
.cpu_user_features = COMMON_USER_PPC64,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -169,6 +175,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "RS64-IV (sstar)",
.cpu_features = CPU_FTRS_RS64,
.cpu_user_features = COMMON_USER_PPC64,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -184,6 +191,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4 (gp)",
.cpu_features = CPU_FTRS_POWER4,
.cpu_user_features = COMMON_USER_POWER4,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -199,6 +207,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4+ (gq)",
.cpu_features = CPU_FTRS_POWER4,
.cpu_user_features = COMMON_USER_POWER4,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -215,6 +224,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -233,6 +243,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -251,6 +262,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -269,6 +281,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -287,6 +300,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -303,6 +317,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5 (gr)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -323,6 +338,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5+ (gs)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5_PLUS,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -339,6 +355,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5+ (gs)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5_PLUS,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -356,6 +373,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5+",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5_PLUS,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.machine_check = machine_check_generic,
@@ -369,6 +387,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_POWER6,
.cpu_user_features = COMMON_USER_POWER6 |
PPC_FEATURE_POWER6_EXT,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -388,6 +407,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER6 (architected)",
.cpu_features = CPU_FTRS_POWER6,
.cpu_user_features = COMMON_USER_POWER6,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.machine_check = machine_check_generic,
@@ -400,6 +420,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7 (architected)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.machine_check = machine_check_generic,
@@ -412,6 +433,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7 (raw)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -434,6 +456,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_PPC64 |
PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP |
PPC_FEATURE_SMT,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 4,
@@ -449,6 +472,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "PA6T",
.cpu_features = CPU_FTRS_PA6T,
.cpu_user_features = COMMON_USER_PA6T,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 6,
@@ -466,6 +490,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4 (compatible)",
.cpu_features = CPU_FTRS_COMPATIBLE,
.cpu_user_features = COMMON_USER_PPC64,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -483,6 +508,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC601,
.cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR |
PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_generic,
@@ -494,6 +520,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "603",
.cpu_features = CPU_FTRS_603,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = 0,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -506,6 +533,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "603e",
.cpu_features = CPU_FTRS_603,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = 0,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -518,6 +546,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "603ev",
.cpu_features = CPU_FTRS_603,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = 0,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -530,6 +559,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "604",
.cpu_features = CPU_FTRS_604,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 2,
@@ -543,6 +573,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "604e",
.cpu_features = CPU_FTRS_604,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -556,6 +587,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "604r",
.cpu_features = CPU_FTRS_604,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -569,6 +601,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "604ev",
.cpu_features = CPU_FTRS_604,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -582,6 +615,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "740/750",
.cpu_features = CPU_FTRS_740_NOTAU,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -595,6 +629,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750CX",
.cpu_features = CPU_FTRS_750,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -608,6 +643,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750CX",
.cpu_features = CPU_FTRS_750,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -622,6 +658,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750CXe",
.cpu_features = CPU_FTRS_750,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -636,6 +673,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750CXe",
.cpu_features = CPU_FTRS_750,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -650,6 +688,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750CL",
.cpu_features = CPU_FTRS_750CL,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -664,6 +703,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "745/755",
.cpu_features = CPU_FTRS_750,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -678,6 +718,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX1,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -692,6 +733,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX2,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -706,6 +748,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -720,6 +763,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750GX",
.cpu_features = CPU_FTRS_750GX,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -734,6 +778,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "740/750",
.cpu_features = CPU_FTRS_740,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -749,6 +794,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7400_NOTAU,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -764,6 +810,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7400,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -779,6 +826,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7400,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -794,6 +842,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7450_20,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -811,6 +860,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7450_21,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -828,6 +878,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7450_23,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -845,6 +896,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7455_1,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -862,6 +914,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7455_20,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -879,6 +932,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7455,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -896,6 +950,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7447_10,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -913,6 +968,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7447_10,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -929,6 +985,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "7447/7457",
.cpu_features = CPU_FTRS_7447,
.cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -946,6 +1003,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7447A,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -963,6 +1021,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7448,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -979,6 +1038,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "82xx",
.cpu_features = CPU_FTRS_82XX,
.cpu_user_features = COMMON_USER,
+ .mmu_features = 0,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -991,6 +1051,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "G2_LE",
.cpu_features = CPU_FTRS_G2_LE,
.cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -1003,6 +1064,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "e300c1",
.cpu_features = CPU_FTRS_E300,
.cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -1015,6 +1077,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "e300c2",
.cpu_features = CPU_FTRS_E300C2,
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -1027,6 +1090,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "e300c3",
.cpu_features = CPU_FTRS_E300,
.cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -1041,6 +1105,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "e300c4",
.cpu_features = CPU_FTRS_E300,
.cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -1056,6 +1121,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "(generic PPC)",
.cpu_features = CPU_FTRS_CLASSIC32,
.cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_generic,
@@ -1071,6 +1137,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
* if the 8xx code is there.... */
.cpu_features = CPU_FTRS_8XX,
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .mmu_features = MMU_FTR_TYPE_8xx,
.icache_bsize = 16,
.dcache_bsize = 16,
.platform = "ppc823",
@@ -1083,6 +1150,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "403GC",
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 16,
.dcache_bsize = 16,
.machine_check = machine_check_4xx,
@@ -1095,6 +1163,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 16,
.dcache_bsize = 16,
.machine_check = machine_check_4xx,
@@ -1106,6 +1175,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "403G ??",
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 16,
.dcache_bsize = 16,
.machine_check = machine_check_4xx,
@@ -1118,6 +1188,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1130,6 +1201,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1142,6 +1214,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1154,6 +1227,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1166,6 +1240,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1178,6 +1253,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1190,6 +1266,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1202,6 +1279,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1213,6 +1291,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "405LP",
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1225,6 +1304,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1237,6 +1317,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1249,6 +1330,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1261,6 +1343,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1273,6 +1356,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1286,6 +1370,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1298,6 +1383,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1312,6 +1398,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GR Rev. A",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1323,6 +1410,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440EP Rev. A",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440ep,
@@ -1335,6 +1423,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GR Rev. B",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1346,6 +1435,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440EP Rev. C",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440ep,
@@ -1358,6 +1448,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440EP Rev. B",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440ep,
@@ -1370,6 +1461,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GRX",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440grx,
@@ -1382,6 +1474,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440EPX",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440epx,
@@ -1394,6 +1487,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GP Rev. B",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1405,6 +1499,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GP Rev. C",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1416,6 +1511,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GX Rev. A",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440gx,
@@ -1428,6 +1524,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GX Rev. B",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440gx,
@@ -1440,6 +1537,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GX Rev. C",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440gx,
@@ -1452,6 +1550,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GX Rev. F",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440gx,
@@ -1464,6 +1563,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440SP Rev. A",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1475,6 +1575,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440SPe Rev. A",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440spe,
@@ -1487,6 +1588,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440SPe Rev. B",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440spe,
@@ -1499,6 +1601,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440 in Virtex-5 FXT",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440x5,
@@ -1509,8 +1612,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pvr_mask = 0xffff0002,
.pvr_value = 0x13020002,
.cpu_name = "460EX",
- .cpu_features = CPU_FTRS_44X,
+ .cpu_features = CPU_FTRS_440x6,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_460ex,
@@ -1521,8 +1625,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pvr_mask = 0xffff0002,
.pvr_value = 0x13020000,
.cpu_name = "460GT",
- .cpu_features = CPU_FTRS_44X,
+ .cpu_features = CPU_FTRS_440x6,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_460gt,
@@ -1535,6 +1640,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "(generic 44x PPC)",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1551,6 +1657,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_EFP_SINGLE |
PPC_FEATURE_UNIFIED_CACHE,
+ .mmu_features = MMU_FTR_TYPE_FSL_E,
.dcache_bsize = 32,
.machine_check = machine_check_e200,
.platform = "ppc5554",
@@ -1565,6 +1672,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_SPE_COMP |
PPC_FEATURE_HAS_EFP_SINGLE_COMP |
PPC_FEATURE_UNIFIED_CACHE,
+ .mmu_features = MMU_FTR_TYPE_FSL_E,
.dcache_bsize = 32,
.machine_check = machine_check_e200,
.platform = "ppc5554",
@@ -1577,6 +1685,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_EFP_SINGLE |
PPC_FEATURE_UNIFIED_CACHE,
+ .mmu_features = MMU_FTR_TYPE_FSL_E,
.dcache_bsize = 32,
.machine_check = machine_check_e200,
.platform = "ppc5554",
@@ -1591,6 +1700,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_SPE_COMP |
PPC_FEATURE_HAS_EFP_SINGLE_COMP,
+ .mmu_features = MMU_FTR_TYPE_FSL_E,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -1608,6 +1718,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_SPE_COMP |
PPC_FEATURE_HAS_EFP_SINGLE_COMP |
PPC_FEATURE_HAS_EFP_DOUBLE_COMP,
+ .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -1622,6 +1733,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "e500mc",
.cpu_features = CPU_FTRS_E500MC,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS,
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 4,
@@ -1638,6 +1750,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_SPE_COMP |
PPC_FEATURE_HAS_EFP_SINGLE_COMP,
+ .mmu_features = MMU_FTR_TYPE_FSL_E,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_e500,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 3a6eaa876ee1..1c5c8a6fc129 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -120,6 +120,26 @@ static inline void dma_direct_unmap_page(struct device *dev,
{
}
+#ifdef CONFIG_NOT_COHERENT_CACHE
+static inline void dma_direct_sync_sg(struct device *dev,
+ struct scatterlist *sgl, int nents,
+ enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+}
+
+static inline void dma_direct_sync_single_range(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
+}
+#endif
+
struct dma_mapping_ops dma_direct_ops = {
.alloc_coherent = dma_direct_alloc_coherent,
.free_coherent = dma_direct_free_coherent,
@@ -128,5 +148,11 @@ struct dma_mapping_ops dma_direct_ops = {
.dma_supported = dma_direct_dma_supported,
.map_page = dma_direct_map_page,
.unmap_page = dma_direct_unmap_page,
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ .sync_single_range_for_cpu = dma_direct_sync_single_range,
+ .sync_single_range_for_device = dma_direct_sync_single_range,
+ .sync_sg_for_cpu = dma_direct_sync_sg,
+ .sync_sg_for_device = dma_direct_sync_sg,
+#endif
};
EXPORT_SYMBOL(dma_direct_ops);
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 7ecc0d1855c3..6f7eb7e00c79 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -1162,39 +1162,17 @@ machine_check_in_rtas:
#ifdef CONFIG_DYNAMIC_FTRACE
_GLOBAL(mcount)
_GLOBAL(_mcount)
- stwu r1,-48(r1)
- stw r3, 12(r1)
- stw r4, 16(r1)
- stw r5, 20(r1)
- stw r6, 24(r1)
- mflr r3
- stw r7, 28(r1)
- mfcr r5
- stw r8, 32(r1)
- stw r9, 36(r1)
- stw r10,40(r1)
- stw r3, 44(r1)
- stw r5, 8(r1)
- subi r3, r3, MCOUNT_INSN_SIZE
- .globl mcount_call
-mcount_call:
- bl ftrace_stub
- nop
- lwz r6, 8(r1)
- lwz r0, 44(r1)
- lwz r3, 12(r1)
+ /*
+ * It is required that _mcount on PPC32 must preserve the
+ * link register. But we have r0 to play with. We use r0
+ * to push the return address back to the caller of mcount
+ * into the ctr register, restore the link register and
+ * then jump back using the ctr register.
+ */
+ mflr r0
mtctr r0
- lwz r4, 16(r1)
- mtcr r6
- lwz r5, 20(r1)
- lwz r6, 24(r1)
- lwz r0, 52(r1)
- lwz r7, 28(r1)
- lwz r8, 32(r1)
+ lwz r0, 4(r1)
mtlr r0
- lwz r9, 36(r1)
- lwz r10,40(r1)
- addi r1, r1, 48
bctr
_GLOBAL(ftrace_caller)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index e0bcf9354286..383ed6eb0085 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -894,18 +894,6 @@ _GLOBAL(enter_prom)
#ifdef CONFIG_DYNAMIC_FTRACE
_GLOBAL(mcount)
_GLOBAL(_mcount)
- /* Taken from output of objdump from lib64/glibc */
- mflr r3
- stdu r1, -112(r1)
- std r3, 128(r1)
- subi r3, r3, MCOUNT_INSN_SIZE
- .globl mcount_call
-mcount_call:
- bl ftrace_stub
- nop
- ld r0, 128(r1)
- mtlr r0
- addi r1, r1, 112
blr
_GLOBAL(ftrace_caller)
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index f4b006ed0ab1..5355244c99ff 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -9,22 +9,30 @@
#include <linux/spinlock.h>
#include <linux/hardirq.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/list.h>
#include <asm/cacheflush.h>
+#include <asm/code-patching.h>
#include <asm/ftrace.h>
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt , ...) do { } while (0)
+#endif
-static unsigned int ftrace_nop = 0x60000000;
+static unsigned int ftrace_nop = PPC_NOP_INSTR;
#ifdef CONFIG_PPC32
# define GET_ADDR(addr) addr
#else
/* PowerPC64's functions are data that points to the functions */
-# define GET_ADDR(addr) *(unsigned long *)addr
+# define GET_ADDR(addr) (*(unsigned long *)addr)
#endif
@@ -33,12 +41,12 @@ static unsigned int ftrace_calc_offset(long ip, long addr)
return (int)(addr - ip);
}
-unsigned char *ftrace_nop_replace(void)
+static unsigned char *ftrace_nop_replace(void)
{
return (char *)&ftrace_nop;
}
-unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
static unsigned int op;
@@ -68,49 +76,422 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
# define _ASM_PTR " .long "
#endif
-int
+static int
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
{
- unsigned replaced;
- unsigned old = *(unsigned *)old_code;
- unsigned new = *(unsigned *)new_code;
- int faulted = 0;
+ unsigned char replaced[MCOUNT_INSN_SIZE];
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
- * as well as code changing.
+ * as well as code changing. We do this by using the
+ * probe_kernel_* functions.
*
* No real locking needed, this code is run through
- * kstop_machine.
+ * kstop_machine, or before SMP starts.
*/
- asm volatile (
- "1: lwz %1, 0(%2)\n"
- " cmpw %1, %5\n"
- " bne 2f\n"
- " stwu %3, 0(%2)\n"
- "2:\n"
- ".section .fixup, \"ax\"\n"
- "3: li %0, 1\n"
- " b 2b\n"
- ".previous\n"
- ".section __ex_table,\"a\"\n"
- _ASM_ALIGN "\n"
- _ASM_PTR "1b, 3b\n"
- ".previous"
- : "=r"(faulted), "=r"(replaced)
- : "r"(ip), "r"(new),
- "0"(faulted), "r"(old)
- : "memory");
-
- if (replaced != old && replaced != new)
- faulted = 2;
-
- if (!faulted)
- flush_icache_range(ip, ip + 8);
-
- return faulted;
+
+ /* read the text we want to modify */
+ if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ /* Make sure it is what we expect it to be */
+ if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
+ return -EINVAL;
+
+ /* replace the text with the new text */
+ if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
+ return -EPERM;
+
+ flush_icache_range(ip, ip + 8);
+
+ return 0;
+}
+
+/*
+ * Helper functions that are the same for both PPC64 and PPC32.
+ */
+static int test_24bit_addr(unsigned long ip, unsigned long addr)
+{
+
+ /* use the create_branch to verify that this offset can be branched */
+ return create_branch((unsigned int *)ip, addr, 0);
+}
+
+static int is_bl_op(unsigned int op)
+{
+ return (op & 0xfc000003) == 0x48000001;
+}
+
+static unsigned long find_bl_target(unsigned long ip, unsigned int op)
+{
+ static int offset;
+
+ offset = (op & 0x03fffffc);
+ /* make it signed */
+ if (offset & 0x02000000)
+ offset |= 0xfe000000;
+
+ return ip + (long)offset;
+}
+
+#ifdef CONFIG_PPC64
+static int
+__ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int op;
+ unsigned int jmp[5];
+ unsigned long ptr;
+ unsigned long ip = rec->ip;
+ unsigned long tramp;
+ int offset;
+
+ /* read where this goes */
+ if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
+ return -EFAULT;
+
+ /* Make sure that that this is still a 24bit jump */
+ if (!is_bl_op(op)) {
+ printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
+ return -EINVAL;
+ }
+
+ /* lets find where the pointer goes */
+ tramp = find_bl_target(ip, op);
+
+ /*
+ * On PPC64 the trampoline looks like:
+ * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
+ * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
+ * Where the bytes 2,3,6 and 7 make up the 32bit offset
+ * to the TOC that holds the pointer.
+ * to jump to.
+ * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
+ * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
+ * The actually address is 32 bytes from the offset
+ * into the TOC.
+ * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
+ */
+
+ DEBUGP("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
+
+ /* Find where the trampoline jumps to */
+ if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
+ printk(KERN_ERR "Failed to read %lx\n", tramp);
+ return -EFAULT;
+ }
+
+ DEBUGP(" %08x %08x", jmp[0], jmp[1]);
+
+ /* verify that this is what we expect it to be */
+ if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
+ ((jmp[1] & 0xffff0000) != 0x398c0000) ||
+ (jmp[2] != 0xf8410028) ||
+ (jmp[3] != 0xe96c0020) ||
+ (jmp[4] != 0xe84c0028)) {
+ printk(KERN_ERR "Not a trampoline\n");
+ return -EINVAL;
+ }
+
+ offset = (unsigned)((unsigned short)jmp[0]) << 16 |
+ (unsigned)((unsigned short)jmp[1]);
+
+ DEBUGP(" %x ", offset);
+
+ /* get the address this jumps too */
+ tramp = mod->arch.toc + offset + 32;
+ DEBUGP("toc: %lx", tramp);
+
+ if (probe_kernel_read(jmp, (void *)tramp, 8)) {
+ printk(KERN_ERR "Failed to read %lx\n", tramp);
+ return -EFAULT;
+ }
+
+ DEBUGP(" %08x %08x\n", jmp[0], jmp[1]);
+
+ ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
+
+ /* This should match what was called */
+ if (ptr != GET_ADDR(addr)) {
+ printk(KERN_ERR "addr does not match %lx\n", ptr);
+ return -EINVAL;
+ }
+
+ /*
+ * We want to nop the line, but the next line is
+ * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
+ * This needs to be turned to a nop too.
+ */
+ if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ if (op != 0xe8410028) {
+ printk(KERN_ERR "Next line is not ld! (%08x)\n", op);
+ return -EINVAL;
+ }
+
+ /*
+ * Milton Miller pointed out that we can not blindly do nops.
+ * If a task was preempted when calling a trace function,
+ * the nops will remove the way to restore the TOC in r2
+ * and the r2 TOC will get corrupted.
+ */
+
+ /*
+ * Replace:
+ * bl <tramp> <==== will be replaced with "b 1f"
+ * ld r2,40(r1)
+ * 1:
+ */
+ op = 0x48000008; /* b +8 */
+
+ if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
+ return -EPERM;
+
+
+ flush_icache_range(ip, ip + 8);
+
+ return 0;
+}
+
+#else /* !PPC64 */
+static int
+__ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int op;
+ unsigned int jmp[4];
+ unsigned long ip = rec->ip;
+ unsigned long tramp;
+
+ if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ /* Make sure that that this is still a 24bit jump */
+ if (!is_bl_op(op)) {
+ printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
+ return -EINVAL;
+ }
+
+ /* lets find where the pointer goes */
+ tramp = find_bl_target(ip, op);
+
+ /*
+ * On PPC32 the trampoline looks like:
+ * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
+ * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
+ * 0x7d, 0x69, 0x03, 0xa6 mtctr r11
+ * 0x4e, 0x80, 0x04, 0x20 bctr
+ */
+
+ DEBUGP("ip:%lx jumps to %lx", ip, tramp);
+
+ /* Find where the trampoline jumps to */
+ if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
+ printk(KERN_ERR "Failed to read %lx\n", tramp);
+ return -EFAULT;
+ }
+
+ DEBUGP(" %08x %08x ", jmp[0], jmp[1]);
+
+ /* verify that this is what we expect it to be */
+ if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
+ ((jmp[1] & 0xffff0000) != 0x396b0000) ||
+ (jmp[2] != 0x7d6903a6) ||
+ (jmp[3] != 0x4e800420)) {
+ printk(KERN_ERR "Not a trampoline\n");
+ return -EINVAL;
+ }
+
+ tramp = (jmp[1] & 0xffff) |
+ ((jmp[0] & 0xffff) << 16);
+ if (tramp & 0x8000)
+ tramp -= 0x10000;
+
+ DEBUGP(" %x ", tramp);
+
+ if (tramp != addr) {
+ printk(KERN_ERR
+ "Trampoline location %08lx does not match addr\n",
+ tramp);
+ return -EINVAL;
+ }
+
+ op = PPC_NOP_INSTR;
+
+ if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
+ return -EPERM;
+
+ flush_icache_range(ip, ip + 8);
+
+ return 0;
+}
+#endif /* PPC64 */
+
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned char *old, *new;
+ unsigned long ip = rec->ip;
+
+ /*
+ * If the calling address is more that 24 bits away,
+ * then we had to use a trampoline to make the call.
+ * Otherwise just update the call site.
+ */
+ if (test_24bit_addr(ip, addr)) {
+ /* within range */
+ old = ftrace_call_replace(ip, addr);
+ new = ftrace_nop_replace();
+ return ftrace_modify_code(ip, old, new);
+ }
+
+ /*
+ * Out of range jumps are called from modules.
+ * We should either already have a pointer to the module
+ * or it has been passed in.
+ */
+ if (!rec->arch.mod) {
+ if (!mod) {
+ printk(KERN_ERR "No module loaded addr=%lx\n",
+ addr);
+ return -EFAULT;
+ }
+ rec->arch.mod = mod;
+ } else if (mod) {
+ if (mod != rec->arch.mod) {
+ printk(KERN_ERR
+ "Record mod %p not equal to passed in mod %p\n",
+ rec->arch.mod, mod);
+ return -EINVAL;
+ }
+ /* nothing to do if mod == rec->arch.mod */
+ } else
+ mod = rec->arch.mod;
+
+ return __ftrace_make_nop(mod, rec, addr);
+
+}
+
+#ifdef CONFIG_PPC64
+static int
+__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int op[2];
+ unsigned long ip = rec->ip;
+
+ /* read where this goes */
+ if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2))
+ return -EFAULT;
+
+ /*
+ * It should be pointing to two nops or
+ * b +8; ld r2,40(r1)
+ */
+ if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
+ ((op[0] != PPC_NOP_INSTR) || (op[1] != PPC_NOP_INSTR))) {
+ printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
+ return -EINVAL;
+ }
+
+ /* If we never set up a trampoline to ftrace_caller, then bail */
+ if (!rec->arch.mod->arch.tramp) {
+ printk(KERN_ERR "No ftrace trampoline\n");
+ return -EINVAL;
+ }
+
+ /* create the branch to the trampoline */
+ op[0] = create_branch((unsigned int *)ip,
+ rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
+ if (!op[0]) {
+ printk(KERN_ERR "REL24 out of range!\n");
+ return -EINVAL;
+ }
+
+ /* ld r2,40(r1) */
+ op[1] = 0xe8410028;
+
+ DEBUGP("write to %lx\n", rec->ip);
+
+ if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
+ return -EPERM;
+
+ flush_icache_range(ip, ip + 8);
+
+ return 0;
+}
+#else
+static int
+__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int op;
+ unsigned long ip = rec->ip;
+
+ /* read where this goes */
+ if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ /* It should be pointing to a nop */
+ if (op != PPC_NOP_INSTR) {
+ printk(KERN_ERR "Expected NOP but have %x\n", op);
+ return -EINVAL;
+ }
+
+ /* If we never set up a trampoline to ftrace_caller, then bail */
+ if (!rec->arch.mod->arch.tramp) {
+ printk(KERN_ERR "No ftrace trampoline\n");
+ return -EINVAL;
+ }
+
+ /* create the branch to the trampoline */
+ op = create_branch((unsigned int *)ip,
+ rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
+ if (!op) {
+ printk(KERN_ERR "REL24 out of range!\n");
+ return -EINVAL;
+ }
+
+ DEBUGP("write to %lx\n", rec->ip);
+
+ if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
+ return -EPERM;
+
+ flush_icache_range(ip, ip + 8);
+
+ return 0;
+}
+#endif /* CONFIG_PPC64 */
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned char *old, *new;
+ unsigned long ip = rec->ip;
+
+ /*
+ * If the calling address is more that 24 bits away,
+ * then we had to use a trampoline to make the call.
+ * Otherwise just update the call site.
+ */
+ if (test_24bit_addr(ip, addr)) {
+ /* within range */
+ old = ftrace_nop_replace();
+ new = ftrace_call_replace(ip, addr);
+ return ftrace_modify_code(ip, old, new);
+ }
+
+ /*
+ * Out of range jumps are called from modules.
+ * Being that we are converting from nop, it had better
+ * already have a module defined.
+ */
+ if (!rec->arch.mod) {
+ printk(KERN_ERR "No module loaded\n");
+ return -EINVAL;
+ }
+
+ return __ftrace_make_call(rec, addr);
}
int ftrace_update_ftrace_func(ftrace_func_t func)
@@ -128,10 +509,10 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
int __init ftrace_dyn_arch_init(void *data)
{
- /* This is running in kstop_machine */
+ /* caller expects data to be zero */
+ unsigned long *p = data;
- ftrace_mcount_set(data);
+ *p = 0;
return 0;
}
-
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 0c326823c6d4..a1c4cfd25ded 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -31,6 +31,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
+#include <asm/bug.h>
/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
#define LOAD_BAT(n, reg, RA, RB) \
@@ -182,7 +183,8 @@ __after_mmu_off:
bl reloc_offset
mr r26,r3
addis r4,r3,KERNELBASE@h /* current address of _start */
- cmpwi 0,r4,0 /* are we already running at 0? */
+ lis r5,PHYSICAL_START@h
+ cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
bne relocate_kernel
/*
* we now have the 1st 16M of ram mapped with the bats.
@@ -810,13 +812,13 @@ giveup_altivec:
/*
* This code is jumped to from the startup code to copy
- * the kernel image to physical address 0.
+ * the kernel image to physical address PHYSICAL_START.
*/
relocate_kernel:
addis r9,r26,klimit@ha /* fetch klimit */
lwz r25,klimit@l(r9)
addis r25,r25,-KERNELBASE@h
- li r3,0 /* Destination base address */
+ lis r3,PHYSICAL_START@h /* Destination base address */
li r6,0 /* Destination offset */
li r5,0x4000 /* # bytes of memory to copy */
bl copy_and_flush /* copy the first 0x4000 bytes */
@@ -989,12 +991,12 @@ load_up_mmu:
LOAD_BAT(1,r3,r4,r5)
LOAD_BAT(2,r3,r4,r5)
LOAD_BAT(3,r3,r4,r5)
-BEGIN_FTR_SECTION
+BEGIN_MMU_FTR_SECTION
LOAD_BAT(4,r3,r4,r5)
LOAD_BAT(5,r3,r4,r5)
LOAD_BAT(6,r3,r4,r5)
LOAD_BAT(7,r3,r4,r5)
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr
/*
@@ -1070,9 +1072,14 @@ start_here:
RFI
/*
+ * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
+ *
* Set up the segment registers for a new context.
*/
-_ENTRY(set_context)
+_ENTRY(switch_mmu_context)
+ lwz r3,MMCONTEXTID(r4)
+ cmpwi cr0,r3,0
+ blt- 4f
mulli r3,r3,897 /* multiply context by skew factor */
rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
addis r3,r3,0x6000 /* Set Ks, Ku bits */
@@ -1083,6 +1090,7 @@ _ENTRY(set_context)
/* Context switch the PTE pointer for the Abatron BDI2000.
* The PGDIR is passed as second argument.
*/
+ lwz r4,MM_PGD(r4)
lis r5, KERNELBASE@h
lwz r5, 0xf0(r5)
stw r4, 0x4(r5)
@@ -1098,6 +1106,9 @@ _ENTRY(set_context)
sync
isync
blr
+4: trap
+ EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
+ blr
/*
* An undocumented "feature" of 604e requires that the v bit
@@ -1131,7 +1142,7 @@ clear_bats:
mtspr SPRN_IBAT2L,r10
mtspr SPRN_IBAT3U,r10
mtspr SPRN_IBAT3L,r10
-BEGIN_FTR_SECTION
+BEGIN_MMU_FTR_SECTION
/* Here's a tweak: at this point, CPU setup have
* not been called yet, so HIGH_BAT_EN may not be
* set in HID0 for the 745x processors. However, it
@@ -1154,7 +1165,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_IBAT6L,r10
mtspr SPRN_IBAT7U,r10
mtspr SPRN_IBAT7L,r10
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr
flush_tlbs:
@@ -1178,11 +1189,11 @@ mmu_off:
/*
* Use the first pair of BAT registers to map the 1st 16MB
- * of RAM to KERNELBASE. From this point on we can't safely
+ * of RAM to PAGE_OFFSET. From this point on we can't safely
* call OF any more.
*/
initial_bats:
- lis r11,KERNELBASE@h
+ lis r11,PAGE_OFFSET@h
mfspr r9,SPRN_PVR
rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
cmpwi 0,r9,1
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index f3a1ea9d7fe4..b56fecc93a16 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -69,6 +69,17 @@ _ENTRY(_start);
li r24,0 /* CPU number */
/*
+ * In case the firmware didn't do it, we apply some workarounds
+ * that are good for all 440 core variants here
+ */
+ mfspr r3,SPRN_CCR0
+ rlwinm r3,r3,0,0,27 /* disable icache prefetch */
+ isync
+ mtspr SPRN_CCR0,r3
+ isync
+ sync
+
+/*
* Set up the initial MMU state
*
* We are still executing code at the virtual address
@@ -391,12 +402,14 @@ interrupt_base:
rlwimi r13,r12,10,30,30
/* Load the PTE */
- rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
+ /* Compute pgdir/pmd offset */
+ rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
lwzx r11, r12, r11 /* Get pgd/pmd entry */
rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
beq 2f /* Bail if no table */
- rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
+ /* Compute pte address */
+ rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
lwz r11, 0(r12) /* Get high word of pte entry */
lwz r12, 4(r12) /* Get low word of pte entry */
@@ -485,12 +498,14 @@ tlb_44x_patch_hwater_D:
/* Make up the required permissions */
li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC
- rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
+ /* Compute pgdir/pmd offset */
+ rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
lwzx r11, r12, r11 /* Get pgd/pmd entry */
rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
beq 2f /* Bail if no table */
- rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
+ /* Compute pte address */
+ rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
lwz r11, 0(r12) /* Get high word of pte entry */
lwz r12, 4(r12) /* Get low word of pte entry */
@@ -554,15 +569,16 @@ tlb_44x_patch_hwater_I:
*/
finish_tlb_load:
/* Combine RPN & ERPN an write WS 0 */
- rlwimi r11,r12,0,0,19
+ rlwimi r11,r12,0,0,31-PAGE_SHIFT
tlbwe r11,r13,PPC44x_TLB_XLAT
/*
* Create WS1. This is the faulting address (EPN),
* page size, and valid flag.
*/
- li r11,PPC44x_TLB_VALID | PPC44x_TLB_4K
- rlwimi r10,r11,0,20,31 /* Insert valid and page size*/
+ li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
+ /* Insert valid and page size */
+ rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
/* And WS 2 */
@@ -634,12 +650,12 @@ _GLOBAL(set_context)
* goes at the beginning of the data segment, which is page-aligned.
*/
.data
- .align 12
+ .align PAGE_SHIFT
.globl sdata
sdata:
.globl empty_zero_page
empty_zero_page:
- .space 4096
+ .space PAGE_SIZE
/*
* To support >32-bit physical addresses, we use an 8KB pgdir.
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 590304c24dad..11b549acc034 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -92,6 +92,7 @@ _ENTRY(_start);
* if needed
*/
+_ENTRY(__early_start)
/* 1. Find the index of the entry we're executing in */
bl invstr /* Find our address */
invstr: mflr r6 /* Make it accessible */
@@ -235,36 +236,40 @@ skpinv: addi r6,r6,1 /* Increment */
tlbivax 0,r9
TLBSYNC
+/* The mapping only needs to be cache-coherent on SMP */
+#ifdef CONFIG_SMP
+#define M_IF_SMP MAS2_M
+#else
+#define M_IF_SMP 0
+#endif
+
/* 6. Setup KERNELBASE mapping in TLB1[0] */
lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
mtspr SPRN_MAS0,r6
lis r6,(MAS1_VALID|MAS1_IPROT)@h
ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l
mtspr SPRN_MAS1,r6
- li r7,0
- lis r6,PAGE_OFFSET@h
- ori r6,r6,PAGE_OFFSET@l
- rlwimi r6,r7,0,20,31
+ lis r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@h
+ ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@l
mtspr SPRN_MAS2,r6
mtspr SPRN_MAS3,r8
tlbwe
/* 7. Jump to KERNELBASE mapping */
- lis r6,KERNELBASE@h
- ori r6,r6,KERNELBASE@l
- rlwimi r6,r7,0,20,31
+ lis r6,(KERNELBASE & ~0xfff)@h
+ ori r6,r6,(KERNELBASE & ~0xfff)@l
lis r7,MSR_KERNEL@h
ori r7,r7,MSR_KERNEL@l
bl 1f /* Find our address */
1: mflr r9
rlwimi r6,r9,0,20,31
- addi r6,r6,24
+ addi r6,r6,(2f - 1b)
mtspr SPRN_SRR0,r6
mtspr SPRN_SRR1,r7
rfi /* start execution out of TLB1[0] entry */
/* 8. Clear out the temp mapping */
- lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
+2: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
mtspr SPRN_MAS0,r7
tlbre
@@ -344,6 +349,15 @@ skpinv: addi r6,r6,1 /* Increment */
mtspr SPRN_DBSR,r2
#endif
+#ifdef CONFIG_SMP
+ /* Check to see if we're the second processor, and jump
+ * to the secondary_start code if so
+ */
+ mfspr r24,SPRN_PIR
+ cmpwi r24,0
+ bne __secondary_start
+#endif
+
/*
* This is where the main kernel code starts.
*/
@@ -685,12 +699,13 @@ interrupt_base:
/* SPE Floating Point Data */
#ifdef CONFIG_SPE
EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
-#else
- EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
-#endif /* CONFIG_SPE */
/* SPE Floating Point Round */
+ EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE)
+#else
+ EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
+#endif /* CONFIG_SPE */
/* Performance Monitor */
EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
@@ -735,6 +750,9 @@ finish_tlb_load:
#else
rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
#endif
+#ifdef CONFIG_SMP
+ ori r12, r12, MAS2_M
+#endif
mtspr SPRN_MAS2, r12
li r10, (_PAGE_HWEXEC | _PAGE_PRESENT)
@@ -746,15 +764,15 @@ finish_tlb_load:
iseleq r12, r12, r10
#ifdef CONFIG_PTE_64BIT
-2: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
+ rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
mtspr SPRN_MAS3, r12
-BEGIN_FTR_SECTION
+BEGIN_MMU_FTR_SECTION
srwi r10, r13, 8 /* grab RPN[8:31] */
mtspr SPRN_MAS7, r10
-END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
#else
-2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
+ rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
mtspr SPRN_MAS3, r11
#endif
#ifdef CONFIG_E200
@@ -1037,6 +1055,63 @@ _GLOBAL(flush_dcache_L1)
blr
+#ifdef CONFIG_SMP
+/* When we get here, r24 needs to hold the CPU # */
+ .globl __secondary_start
+__secondary_start:
+ lis r3,__secondary_hold_acknowledge@h
+ ori r3,r3,__secondary_hold_acknowledge@l
+ stw r24,0(r3)
+
+ li r3,0
+ mr r4,r24 /* Why? */
+ bl call_setup_cpu
+
+ lis r3,tlbcam_index@ha
+ lwz r3,tlbcam_index@l(r3)
+ mtctr r3
+ li r26,0 /* r26 safe? */
+
+ /* Load each CAM entry */
+1: mr r3,r26
+ bl loadcam_entry
+ addi r26,r26,1
+ bdnz 1b
+
+ /* get current_thread_info and current */
+ lis r1,secondary_ti@ha
+ lwz r1,secondary_ti@l(r1)
+ lwz r2,TI_TASK(r1)
+
+ /* stack */
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ li r0,0
+ stw r0,0(r1)
+
+ /* ptr to current thread */
+ addi r4,r2,THREAD /* address of our thread_struct */
+ mtspr SPRN_SPRG3,r4
+
+ /* Setup the defaults for TLB entries */
+ li r4,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l
+ mtspr SPRN_MAS4,r4
+
+ /* Jump to start_secondary */
+ lis r4,MSR_KERNEL@h
+ ori r4,r4,MSR_KERNEL@l
+ lis r3,start_secondary@h
+ ori r3,r3,start_secondary@l
+ mtspr SPRN_SRR0,r3
+ mtspr SPRN_SRR1,r4
+ sync
+ rfi
+ sync
+
+ .globl __secondary_hold_acknowledge
+__secondary_hold_acknowledge:
+ .long -1
+#endif
+
/*
* We put a few things here that have to be page-aligned. This stuff
* goes at the beginning of the data segment, which is page-aligned.
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 64299d28f364..6e3f62493659 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -47,7 +47,7 @@
#include <asm/abs_addr.h>
static struct device ibmebus_bus_device = { /* fake "parent" device */
- .bus_id = "ibmebus",
+ .init_name = "ibmebus",
};
struct bus_type ibmebus_bus_type;
@@ -231,6 +231,7 @@ void ibmebus_free_irq(u32 ist, void *dev_id)
unsigned int irq = irq_find_mapping(NULL, ist);
free_irq(irq, dev_id);
+ irq_dispose_mapping(irq);
}
EXPORT_SYMBOL(ibmebus_free_irq);
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 31982d05d81a..88d9c1d5e5fb 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -69,10 +69,15 @@ void cpu_idle(void)
smp_mb();
local_irq_disable();
+ /* Don't trace irqs off for idle */
+ stop_critical_timings();
+
/* check again after disabling irqs */
if (!need_resched() && !cpu_should_die())
ppc_md.power_save();
+ start_critical_timings();
+
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c
index 4c85b8d56478..688b329800bd 100644
--- a/arch/powerpc/kernel/init_task.c
+++ b/arch/powerpc/kernel/init_task.c
@@ -7,7 +7,6 @@
#include <linux/mqueue.h>
#include <asm/uaccess.h>
-static struct fs_struct init_fs = INIT_FS;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ac222d0ab12e..23b8b5e36f98 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -237,7 +237,7 @@ void fixup_irqs(cpumask_t map)
mask = map;
}
if (irq_desc[irq].chip->set_affinity)
- irq_desc[irq].chip->set_affinity(irq, mask);
+ irq_desc[irq].chip->set_affinity(irq, &mask);
else if (irq_desc[irq].action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq);
}
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index de79915452c8..c9329786073b 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -96,9 +96,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
- mutex_lock(&kprobe_mutex);
- free_insn_slot(p->ainsn.insn, 0);
- mutex_unlock(&kprobe_mutex);
+ if (p->ainsn.insn) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
}
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -316,7 +317,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more then one return
+ * have a return probe installed on them, and/or more than one return
* return probe was registered for a target function.
*
* We can handle this because:
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index ac2a21f45c75..b3abebb7ee64 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -13,13 +13,17 @@
#include <linux/reboot.h>
#include <linux/threads.h>
#include <linux/lmb.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/prom.h>
+#include <asm/sections.h>
void machine_crash_shutdown(struct pt_regs *regs)
{
if (ppc_md.machine_crash_shutdown)
ppc_md.machine_crash_shutdown(regs);
+ else
+ default_machine_crash_shutdown(regs);
}
/*
@@ -31,11 +35,8 @@ int machine_kexec_prepare(struct kimage *image)
{
if (ppc_md.machine_kexec_prepare)
return ppc_md.machine_kexec_prepare(image);
- /*
- * Fail if platform doesn't provide its own machine_kexec_prepare
- * implementation.
- */
- return -ENOSYS;
+ else
+ return default_machine_kexec_prepare(image);
}
void machine_kexec_cleanup(struct kimage *image)
@@ -52,13 +53,11 @@ void machine_kexec(struct kimage *image)
{
if (ppc_md.machine_kexec)
ppc_md.machine_kexec(image);
- else {
- /*
- * Fall back to normal restart if platform doesn't provide
- * its own kexec function, and user insist to kexec...
- */
- machine_restart(NULL);
- }
+ else
+ default_machine_kexec(image);
+
+ /* Fall back to normal restart if we're still alive. */
+ machine_restart(NULL);
for(;;);
}
@@ -118,3 +117,71 @@ int overlaps_crashkernel(unsigned long start, unsigned long size)
{
return (start + size) > crashk_res.start && start <= crashk_res.end;
}
+
+/* Values we need to export to the second kernel via the device tree. */
+static unsigned long kernel_end;
+static unsigned long crashk_size;
+
+static struct property kernel_end_prop = {
+ .name = "linux,kernel-end",
+ .length = sizeof(unsigned long),
+ .value = &kernel_end,
+};
+
+static struct property crashk_base_prop = {
+ .name = "linux,crashkernel-base",
+ .length = sizeof(unsigned long),
+ .value = &crashk_res.start,
+};
+
+static struct property crashk_size_prop = {
+ .name = "linux,crashkernel-size",
+ .length = sizeof(unsigned long),
+ .value = &crashk_size,
+};
+
+static void __init export_crashk_values(struct device_node *node)
+{
+ struct property *prop;
+
+ /* There might be existing crash kernel properties, but we can't
+ * be sure what's in them, so remove them. */
+ prop = of_find_property(node, "linux,crashkernel-base", NULL);
+ if (prop)
+ prom_remove_property(node, prop);
+
+ prop = of_find_property(node, "linux,crashkernel-size", NULL);
+ if (prop)
+ prom_remove_property(node, prop);
+
+ if (crashk_res.start != 0) {
+ prom_add_property(node, &crashk_base_prop);
+ crashk_size = crashk_res.end - crashk_res.start + 1;
+ prom_add_property(node, &crashk_size_prop);
+ }
+}
+
+static int __init kexec_setup(void)
+{
+ struct device_node *node;
+ struct property *prop;
+
+ node = of_find_node_by_path("/chosen");
+ if (!node)
+ return -ENOENT;
+
+ /* remove any stale properties so ours can be found */
+ prop = of_find_property(node, kernel_end_prop.name, NULL);
+ if (prop)
+ prom_remove_property(node, prop);
+
+ /* information needed by userspace when using default_machine_kexec */
+ kernel_end = __pa(_end);
+ prom_add_property(node, &kernel_end_prop);
+
+ export_crashk_values(node);
+
+ of_node_put(node);
+ return 0;
+}
+late_initcall(kexec_setup);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 3c4ca046e854..49e705fcee6d 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -289,7 +289,7 @@ void default_machine_kexec(struct kimage *image)
}
/* Values we need to export to the second kernel via the device tree. */
-static unsigned long htab_base, kernel_end;
+static unsigned long htab_base;
static struct property htab_base_prop = {
.name = "linux,htab-base",
@@ -303,25 +303,20 @@ static struct property htab_size_prop = {
.value = &htab_size_bytes,
};
-static struct property kernel_end_prop = {
- .name = "linux,kernel-end",
- .length = sizeof(unsigned long),
- .value = &kernel_end,
-};
-
-static void __init export_htab_values(void)
+static int __init export_htab_values(void)
{
struct device_node *node;
struct property *prop;
+ /* On machines with no htab htab_address is NULL */
+ if (!htab_address)
+ return -ENODEV;
+
node = of_find_node_by_path("/chosen");
if (!node)
- return;
+ return -ENODEV;
/* remove any stale propertys so ours can be found */
- prop = of_find_property(node, kernel_end_prop.name, NULL);
- if (prop)
- prom_remove_property(node, prop);
prop = of_find_property(node, htab_base_prop.name, NULL);
if (prop)
prom_remove_property(node, prop);
@@ -329,68 +324,11 @@ static void __init export_htab_values(void)
if (prop)
prom_remove_property(node, prop);
- /* information needed by userspace when using default_machine_kexec */
- kernel_end = __pa(_end);
- prom_add_property(node, &kernel_end_prop);
-
- /* On machines with no htab htab_address is NULL */
- if (NULL == htab_address)
- goto out;
-
htab_base = __pa(htab_address);
prom_add_property(node, &htab_base_prop);
prom_add_property(node, &htab_size_prop);
- out:
- of_node_put(node);
-}
-
-static struct property crashk_base_prop = {
- .name = "linux,crashkernel-base",
- .length = sizeof(unsigned long),
- .value = &crashk_res.start,
-};
-
-static unsigned long crashk_size;
-
-static struct property crashk_size_prop = {
- .name = "linux,crashkernel-size",
- .length = sizeof(unsigned long),
- .value = &crashk_size,
-};
-
-static void __init export_crashk_values(void)
-{
- struct device_node *node;
- struct property *prop;
-
- node = of_find_node_by_path("/chosen");
- if (!node)
- return;
-
- /* There might be existing crash kernel properties, but we can't
- * be sure what's in them, so remove them. */
- prop = of_find_property(node, "linux,crashkernel-base", NULL);
- if (prop)
- prom_remove_property(node, prop);
-
- prop = of_find_property(node, "linux,crashkernel-size", NULL);
- if (prop)
- prom_remove_property(node, prop);
-
- if (crashk_res.start != 0) {
- prom_add_property(node, &crashk_base_prop);
- crashk_size = crashk_res.end - crashk_res.start + 1;
- prom_add_property(node, &crashk_size_prop);
- }
-
of_node_put(node);
-}
-
-static int __init kexec_setup(void)
-{
- export_htab_values();
- export_crashk_values();
return 0;
}
-__initcall(kexec_setup);
+late_initcall(export_htab_values);
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 5c33bc14bd9f..15f28e0de78d 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -29,6 +29,7 @@
#include <asm/asm-offsets.h>
#include <asm/processor.h>
#include <asm/kexec.h>
+#include <asm/bug.h>
.text
@@ -271,231 +272,6 @@ _GLOBAL(real_writeb)
#endif /* CONFIG_40x */
-/*
- * Flush MMU TLB
- */
-#ifndef CONFIG_FSL_BOOKE
-_GLOBAL(_tlbil_all)
-_GLOBAL(_tlbil_pid)
-#endif
-_GLOBAL(_tlbia)
-#if defined(CONFIG_40x)
- sync /* Flush to memory before changing mapping */
- tlbia
- isync /* Flush shadow TLB */
-#elif defined(CONFIG_44x)
- li r3,0
- sync
-
- /* Load high watermark */
- lis r4,tlb_44x_hwater@ha
- lwz r5,tlb_44x_hwater@l(r4)
-
-1: tlbwe r3,r3,PPC44x_TLB_PAGEID
- addi r3,r3,1
- cmpw 0,r3,r5
- ble 1b
-
- isync
-#elif defined(CONFIG_FSL_BOOKE)
- /* Invalidate all entries in TLB0 */
- li r3, 0x04
- tlbivax 0,3
- /* Invalidate all entries in TLB1 */
- li r3, 0x0c
- tlbivax 0,3
- msync
-#ifdef CONFIG_SMP
- tlbsync
-#endif /* CONFIG_SMP */
-#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
-#if defined(CONFIG_SMP)
- rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
- lwz r8,TI_CPU(r8)
- oris r8,r8,10
- mfmsr r10
- SYNC
- rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
- rlwinm r0,r0,0,28,26 /* clear DR */
- mtmsr r0
- SYNC_601
- isync
- lis r9,mmu_hash_lock@h
- ori r9,r9,mmu_hash_lock@l
- tophys(r9,r9)
-10: lwarx r7,0,r9
- cmpwi 0,r7,0
- bne- 10b
- stwcx. r8,0,r9
- bne- 10b
- sync
- tlbia
- sync
- TLBSYNC
- li r0,0
- stw r0,0(r9) /* clear mmu_hash_lock */
- mtmsr r10
- SYNC_601
- isync
-#else /* CONFIG_SMP */
- sync
- tlbia
- sync
-#endif /* CONFIG_SMP */
-#endif /* ! defined(CONFIG_40x) */
- blr
-
-/*
- * Flush MMU TLB for a particular address
- */
-#ifndef CONFIG_FSL_BOOKE
-_GLOBAL(_tlbil_va)
-#endif
-_GLOBAL(_tlbie)
-#if defined(CONFIG_40x)
- /* We run the search with interrupts disabled because we have to change
- * the PID and I don't want to preempt when that happens.
- */
- mfmsr r5
- mfspr r6,SPRN_PID
- wrteei 0
- mtspr SPRN_PID,r4
- tlbsx. r3, 0, r3
- mtspr SPRN_PID,r6
- wrtee r5
- bne 10f
- sync
- /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
- * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
- * the TLB entry. */
- tlbwe r3, r3, TLB_TAG
- isync
-10:
-
-#elif defined(CONFIG_44x)
- mfspr r5,SPRN_MMUCR
- rlwimi r5,r4,0,24,31 /* Set TID */
-
- /* We have to run the search with interrupts disabled, even critical
- * and debug interrupts (in fact the only critical exceptions we have
- * are debug and machine check). Otherwise an interrupt which causes
- * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
- mfmsr r4
- lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
- addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
- andc r6,r4,r6
- mtmsr r6
- mtspr SPRN_MMUCR,r5
- tlbsx. r3, 0, r3
- mtmsr r4
- bne 10f
- sync
- /* There are only 64 TLB entries, so r3 < 64,
- * which means bit 22, is clear. Since 22 is
- * the V bit in the TLB_PAGEID, loading this
- * value will invalidate the TLB entry.
- */
- tlbwe r3, r3, PPC44x_TLB_PAGEID
- isync
-10:
-#elif defined(CONFIG_FSL_BOOKE)
- rlwinm r4, r3, 0, 0, 19
- ori r5, r4, 0x08 /* TLBSEL = 1 */
- tlbivax 0, r4
- tlbivax 0, r5
- msync
-#if defined(CONFIG_SMP)
- tlbsync
-#endif /* CONFIG_SMP */
-#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
-#if defined(CONFIG_SMP)
- rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
- lwz r8,TI_CPU(r8)
- oris r8,r8,11
- mfmsr r10
- SYNC
- rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
- rlwinm r0,r0,0,28,26 /* clear DR */
- mtmsr r0
- SYNC_601
- isync
- lis r9,mmu_hash_lock@h
- ori r9,r9,mmu_hash_lock@l
- tophys(r9,r9)
-10: lwarx r7,0,r9
- cmpwi 0,r7,0
- bne- 10b
- stwcx. r8,0,r9
- bne- 10b
- eieio
- tlbie r3
- sync
- TLBSYNC
- li r0,0
- stw r0,0(r9) /* clear mmu_hash_lock */
- mtmsr r10
- SYNC_601
- isync
-#else /* CONFIG_SMP */
- tlbie r3
- sync
-#endif /* CONFIG_SMP */
-#endif /* ! CONFIG_40x */
- blr
-
-#if defined(CONFIG_FSL_BOOKE)
-/*
- * Flush MMU TLB, but only on the local processor (no broadcast)
- */
-_GLOBAL(_tlbil_all)
-#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
- MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
- li r3,(MMUCSR0_TLBFI)@l
- mtspr SPRN_MMUCSR0, r3
-1:
- mfspr r3,SPRN_MMUCSR0
- andi. r3,r3,MMUCSR0_TLBFI@l
- bne 1b
- blr
-
-/*
- * Flush MMU TLB for a particular process id, but only on the local processor
- * (no broadcast)
- */
-_GLOBAL(_tlbil_pid)
-/* we currently do an invalidate all since we don't have per pid invalidate */
- li r3,(MMUCSR0_TLBFI)@l
- mtspr SPRN_MMUCSR0, r3
-1:
- mfspr r3,SPRN_MMUCSR0
- andi. r3,r3,MMUCSR0_TLBFI@l
- bne 1b
- msync
- isync
- blr
-
-/*
- * Flush MMU TLB for a particular address, but only on the local processor
- * (no broadcast)
- */
-_GLOBAL(_tlbil_va)
- mfmsr r10
- wrteei 0
- slwi r4,r4,16
- mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
- tlbsx 0,r3
- mfspr r4,SPRN_MAS1 /* check valid */
- andis. r3,r4,MAS1_VALID@h
- beq 1f
- rlwinm r4,r4,0,1,31
- mtspr SPRN_MAS1,r4
- tlbwe
- msync
- isync
-1: wrtee r10
- blr
-#endif /* CONFIG_FSL_BOOKE */
-
/*
* Flush instruction cache.
@@ -650,8 +426,8 @@ _GLOBAL(__flush_dcache_icache)
BEGIN_FTR_SECTION
blr
END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
- rlwinm r3,r3,0,0,19 /* Get page base address */
- li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
+ rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
+ li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
mtctr r4
mr r6,r3
0: dcbst 0,r3 /* Write line to ram */
@@ -691,8 +467,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
rlwinm r0,r10,0,28,26 /* clear DR */
mtmsr r0
isync
- rlwinm r3,r3,0,0,19 /* Get page base address */
- li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
+ rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
+ li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
mtctr r4
mr r6,r3
0: dcbst 0,r3 /* Write line to ram */
@@ -716,7 +492,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
* void clear_pages(void *page, int order) ;
*/
_GLOBAL(clear_pages)
- li r0,4096/L1_CACHE_BYTES
+ li r0,PAGE_SIZE/L1_CACHE_BYTES
slw r0,r0,r4
mtctr r0
#ifdef CONFIG_8xx
@@ -774,7 +550,7 @@ _GLOBAL(copy_page)
dcbt r5,r4
li r11,L1_CACHE_BYTES+4
#endif /* MAX_COPY_PREFETCH */
- li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
+ li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
crclr 4*cr0+eq
2:
mtctr r0
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 7ff292475269..43e7e3a7f130 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -78,6 +78,12 @@ int module_finalize(const Elf_Ehdr *hdr,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
+ sect = find_section(hdr, sechdrs, "__mmu_ftr_fixup");
+ if (sect != NULL)
+ do_feature_fixups(cur_cpu_spec->mmu_features,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
+
#ifdef CONFIG_PPC64
sect = find_section(hdr, sechdrs, "__fw_ftr_fixup");
if (sect != NULL)
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 2df91a03462a..f832773fc28e 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -22,6 +22,7 @@
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/ftrace.h>
#include <linux/cache.h>
#include <linux/bug.h>
#include <linux/sort.h>
@@ -53,6 +54,9 @@ static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
r_addend = rela[i].r_addend;
}
+#ifdef CONFIG_DYNAMIC_FTRACE
+ _count_relocs++; /* add one for ftrace_caller */
+#endif
return _count_relocs;
}
@@ -306,5 +310,11 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
return -ENOEXEC;
}
}
+#ifdef CONFIG_DYNAMIC_FTRACE
+ module->arch.tramp =
+ do_plt_call(module->module_core,
+ (unsigned long)ftrace_caller,
+ sechdrs, module);
+#endif
return 0;
}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 1af2377e4992..8992b031a7b6 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -20,6 +20,7 @@
#include <linux/moduleloader.h>
#include <linux/err.h>
#include <linux/vmalloc.h>
+#include <linux/ftrace.h>
#include <linux/bug.h>
#include <asm/module.h>
#include <asm/firmware.h>
@@ -163,6 +164,11 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
}
}
+#ifdef CONFIG_DYNAMIC_FTRACE
+ /* make the trampoline to the ftrace_caller */
+ relocs++;
+#endif
+
DEBUGP("Looks like a total of %lu stubs, max\n", relocs);
return relocs * sizeof(struct ppc64_stub_entry);
}
@@ -441,5 +447,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
}
}
+#ifdef CONFIG_DYNAMIC_FTRACE
+ me->arch.toc = my_r2(sechdrs, me);
+ me->arch.tramp = stub_for_addr(sechdrs,
+ (unsigned long)ftrace_caller,
+ me);
+#endif
+
return 0;
}
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index f3c9cae01dd5..fa983a59c4ce 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -14,7 +14,6 @@ static void of_device_make_bus_id(struct of_device *dev)
{
static atomic_t bus_no_reg_magic;
struct device_node *node = dev->node;
- char *name = dev->dev.bus_id;
const u32 *reg;
u64 addr;
int magic;
@@ -27,14 +26,12 @@ static void of_device_make_bus_id(struct of_device *dev)
reg = of_get_property(node, "dcr-reg", NULL);
if (reg) {
#ifdef CONFIG_PPC_DCR_NATIVE
- snprintf(name, BUS_ID_SIZE, "d%x.%s",
- *reg, node->name);
+ dev_set_name(&dev->dev, "d%x.%s", *reg, node->name);
#else /* CONFIG_PPC_DCR_NATIVE */
addr = of_translate_dcr_address(node, *reg, NULL);
if (addr != OF_BAD_ADDR) {
- snprintf(name, BUS_ID_SIZE,
- "D%llx.%s", (unsigned long long)addr,
- node->name);
+ dev_set_name(&dev->dev, "D%llx.%s",
+ (unsigned long long)addr, node->name);
return;
}
#endif /* !CONFIG_PPC_DCR_NATIVE */
@@ -48,9 +45,8 @@ static void of_device_make_bus_id(struct of_device *dev)
if (reg) {
addr = of_translate_address(node, reg);
if (addr != OF_BAD_ADDR) {
- snprintf(name, BUS_ID_SIZE,
- "%llx.%s", (unsigned long long)addr,
- node->name);
+ dev_set_name(&dev->dev, "%llx.%s",
+ (unsigned long long)addr, node->name);
return;
}
}
@@ -60,7 +56,7 @@ static void of_device_make_bus_id(struct of_device *dev)
* counter (and pray...)
*/
magic = atomic_add_return(1, &bus_no_reg_magic);
- snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1);
+ dev_set_name(&dev->dev, "%s.%d", node->name, magic - 1);
}
struct of_device *of_device_alloc(struct device_node *np,
@@ -80,7 +76,7 @@ struct of_device *of_device_alloc(struct device_node *np,
dev->dev.archdata.of_node = np;
if (bus_id)
- strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
+ dev_set_name(&dev->dev, bus_id);
else
of_device_make_bus_id(dev);
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 48a347133f41..c744b327bcab 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -37,6 +37,7 @@ struct lppaca lppaca[] = {
.end_of_quantum = 0xfffffffffffffffful,
.slb_count = 64,
.vmxregs_in_use = 0,
+ .page_ins = 0,
},
};
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index f36936d9fda3..da5a3855a0c4 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -16,7 +16,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#undef DEBUG
+#define DEBUG
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -37,13 +37,7 @@
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
-
-#ifdef DEBUG
-#include <asm/udbg.h>
-#define DBG(fmt...) printk(fmt)
-#else
-#define DBG(fmt...)
-#endif
+#include <asm/eeh.h>
static DEFINE_SPINLOCK(hose_spinlock);
@@ -53,8 +47,9 @@ static int global_phb_number; /* Global phb counter */
/* ISA Memory physical address */
resource_size_t isa_mem_base;
-/* Default PCI flags is 0 */
-unsigned int ppc_pci_flags;
+/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
+unsigned int ppc_pci_flags = 0;
+
static struct dma_mapping_ops *pci_dma_ops;
@@ -165,8 +160,6 @@ EXPORT_SYMBOL(pci_domain_nr);
*/
struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
{
- if (!have_of)
- return NULL;
while(node) {
struct pci_controller *hose, *tmp;
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
@@ -208,26 +201,6 @@ char __devinit *pcibios_setup(char *str)
return str;
}
-void __devinit pcibios_setup_new_device(struct pci_dev *dev)
-{
- struct dev_archdata *sd = &dev->dev.archdata;
-
- sd->of_node = pci_device_to_OF_node(dev);
-
- DBG("PCI: device %s OF node: %s\n", pci_name(dev),
- sd->of_node ? sd->of_node->full_name : "<none>");
-
- sd->dma_ops = pci_dma_ops;
-#ifdef CONFIG_PPC32
- sd->dma_data = (void *)PCI_DRAM_OFFSET;
-#endif
- set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
-
- if (ppc_md.pci_dma_dev_setup)
- ppc_md.pci_dma_dev_setup(dev);
-}
-EXPORT_SYMBOL(pcibios_setup_new_device);
-
/*
* Reads the interrupt pin to determine if interrupt is use by card.
* If the interrupt is used, then gets the interrupt line from the
@@ -252,7 +225,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
return -1;
#endif
- DBG("Try to map irq for %s...\n", pci_name(pci_dev));
+ pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
#ifdef DEBUG
memset(&oirq, 0xff, sizeof(oirq));
@@ -276,26 +249,26 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
line == 0xff || line == 0) {
return -1;
}
- DBG(" -> no map ! Using line %d (pin %d) from PCI config\n",
- line, pin);
+ pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
+ line, pin);
virq = irq_create_mapping(NULL, line);
if (virq != NO_IRQ)
set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
} else {
- DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
- oirq.size, oirq.specifier[0], oirq.specifier[1],
+ pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
+ oirq.size, oirq.specifier[0], oirq.specifier[1],
oirq.controller->full_name);
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
}
if(virq == NO_IRQ) {
- DBG(" -> failed to map !\n");
+ pr_debug(" Failed to map !\n");
return -1;
}
- DBG(" -> mapped to linux irq %d\n", virq);
+ pr_debug(" Mapped to linux irq %d\n", virq);
pci_dev->irq = virq;
@@ -397,13 +370,10 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
}
/* XXX would be nice to have a way to ask for write-through */
- prot |= _PAGE_NO_CACHE;
if (write_combine)
- prot &= ~_PAGE_GUARDED;
+ return pgprot_noncached_wc(prot);
else
- prot |= _PAGE_GUARDED;
-
- return __pgprot(prot);
+ return pgprot_noncached(prot);
}
/*
@@ -414,19 +384,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long pfn,
unsigned long size,
- pgprot_t protection)
+ pgprot_t prot)
{
struct pci_dev *pdev = NULL;
struct resource *found = NULL;
- unsigned long prot = pgprot_val(protection);
resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
int i;
if (page_is_ram(pfn))
- return __pgprot(prot);
-
- prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
+ return prot;
+ prot = pgprot_noncached(prot);
for_each_pci_dev(pdev) {
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
struct resource *rp = &pdev->resource[i];
@@ -447,14 +415,14 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
}
if (found) {
if (found->flags & IORESOURCE_PREFETCH)
- prot &= ~_PAGE_GUARDED;
+ prot = pgprot_noncached_wc(prot);
pci_dev_put(pdev);
}
- DBG("non-PCI map for %llx, prot: %lx\n",
- (unsigned long long)offset, prot);
+ pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
+ (unsigned long long)offset, pgprot_val(prot));
- return __pgprot(prot);
+ return prot;
}
@@ -610,8 +578,7 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus,
pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
vma->vm_pgoff = offset >> PAGE_SHIFT;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
@@ -853,15 +820,12 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
int pci_proc_domain(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
-#ifdef CONFIG_PPC64
- return hose->buid != 0;
-#else
+
if (!(ppc_pci_flags & PPC_PCI_ENABLE_PROC_DOMAINS))
return 0;
if (ppc_pci_flags & PPC_PCI_COMPAT_DOMAIN_0)
return hose->global_number != 0;
return 1;
-#endif
}
void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
@@ -1083,27 +1047,50 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
}
}
-static void __devinit __pcibios_fixup_bus(struct pci_bus *bus)
+void __devinit pcibios_setup_bus_self(struct pci_bus *bus)
{
- struct pci_dev *dev = bus->self;
-
- pr_debug("PCI: Fixup bus %d (%s)\n", bus->number, dev ? pci_name(dev) : "PHB");
-
- /* Fixup PCI<->PCI bridges. Host bridges are handled separately, for
- * now differently between 32 and 64 bits.
- */
- if (dev != NULL)
+ /* Fix up the bus resources for P2P bridges */
+ if (bus->self != NULL)
pcibios_fixup_bridge(bus);
- /* Additional setup that is different between 32 and 64 bits for now */
- pcibios_do_bus_setup(bus);
-
- /* Platform specific bus fixups */
+ /* Platform specific bus fixups. This is currently only used
+ * by fsl_pci and I'm hoping to get rid of it at some point
+ */
if (ppc_md.pcibios_fixup_bus)
ppc_md.pcibios_fixup_bus(bus);
- /* Read default IRQs and fixup if necessary */
+ /* Setup bus DMA mappings */
+ if (ppc_md.pci_dma_bus_setup)
+ ppc_md.pci_dma_bus_setup(bus);
+}
+
+void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ pr_debug("PCI: Fixup bus devices %d (%s)\n",
+ bus->number, bus->self ? pci_name(bus->self) : "PHB");
+
list_for_each_entry(dev, &bus->devices, bus_list) {
+ struct dev_archdata *sd = &dev->dev.archdata;
+
+ /* Setup OF node pointer in archdata */
+ sd->of_node = pci_device_to_OF_node(dev);
+
+ /* Fixup NUMA node as it may not be setup yet by the generic
+ * code and is needed by the DMA init
+ */
+ set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
+
+ /* Hook up default DMA ops */
+ sd->dma_ops = pci_dma_ops;
+ sd->dma_data = (void *)PCI_DRAM_OFFSET;
+
+ /* Additional platform DMA/iommu setup */
+ if (ppc_md.pci_dma_dev_setup)
+ ppc_md.pci_dma_dev_setup(dev);
+
+ /* Read default IRQs and fixup if necessary */
pci_read_irq_line(dev);
if (ppc_md.pci_irq_fixup)
ppc_md.pci_irq_fixup(dev);
@@ -1113,22 +1100,19 @@ static void __devinit __pcibios_fixup_bus(struct pci_bus *bus)
void __devinit pcibios_fixup_bus(struct pci_bus *bus)
{
/* When called from the generic PCI probe, read PCI<->PCI bridge
- * bases before proceeding
+ * bases. This is -not- called when generating the PCI tree from
+ * the OF device-tree.
*/
if (bus->self != NULL)
pci_read_bridge_bases(bus);
- __pcibios_fixup_bus(bus);
-}
-EXPORT_SYMBOL(pcibios_fixup_bus);
-/* When building a bus from the OF tree rather than probing, we need a
- * slightly different version of the fixup which doesn't read the
- * bridge bases using config space accesses
- */
-void __devinit pcibios_fixup_of_probed_bus(struct pci_bus *bus)
-{
- __pcibios_fixup_bus(bus);
+ /* Now fixup the bus bus */
+ pcibios_setup_bus_self(bus);
+
+ /* Now fixup devices on that bus */
+ pcibios_setup_bus_devices(bus);
}
+EXPORT_SYMBOL(pcibios_fixup_bus);
static int skip_isa_ioresource_align(struct pci_dev *dev)
{
@@ -1198,10 +1182,10 @@ static int __init reparent_resources(struct resource *parent,
*pp = NULL;
for (p = res->child; p != NULL; p = p->sibling) {
p->parent = res;
- DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
- p->name,
- (unsigned long long)p->start,
- (unsigned long long)p->end, res->name);
+ pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
+ p->name,
+ (unsigned long long)p->start,
+ (unsigned long long)p->end, res->name);
}
return 0;
}
@@ -1245,9 +1229,12 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
int i;
struct resource *res, *pr;
+ pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
+ pci_domain_nr(bus), bus->number);
+
for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
if ((res = bus->resource[i]) == NULL || !res->flags
- || res->start > res->end)
+ || res->start > res->end || res->parent)
continue;
if (bus->parent == NULL)
pr = (res->flags & IORESOURCE_IO) ?
@@ -1271,14 +1258,14 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
}
}
- DBG("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
- "[0x%x], parent %p (%s)\n",
- bus->self ? pci_name(bus->self) : "PHB",
- bus->number, i,
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned int)res->flags,
- pr, (pr && pr->name) ? pr->name : "nil");
+ pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
+ "[0x%x], parent %p (%s)\n",
+ bus->self ? pci_name(bus->self) : "PHB",
+ bus->number, i,
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ (unsigned int)res->flags,
+ pr, (pr && pr->name) ? pr->name : "nil");
if (pr && !(pr->flags & IORESOURCE_UNSET)) {
if (request_resource(pr, res) == 0)
@@ -1305,11 +1292,11 @@ static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
{
struct resource *pr, *r = &dev->resource[idx];
- DBG("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
- pci_name(dev), idx,
- (unsigned long long)r->start,
- (unsigned long long)r->end,
- (unsigned int)r->flags);
+ pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
+ pci_name(dev), idx,
+ (unsigned long long)r->start,
+ (unsigned long long)r->end,
+ (unsigned int)r->flags);
pr = pci_find_parent_resource(dev, r);
if (!pr || (pr->flags & IORESOURCE_UNSET) ||
@@ -1317,10 +1304,11 @@ static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
" of device %s, will remap\n", idx, pci_name(dev));
if (pr)
- DBG("PCI: parent is %p: %016llx-%016llx [%x]\n", pr,
- (unsigned long long)pr->start,
- (unsigned long long)pr->end,
- (unsigned int)pr->flags);
+ pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
+ pr,
+ (unsigned long long)pr->start,
+ (unsigned long long)pr->end,
+ (unsigned int)pr->flags);
/* We'll assign a new address later */
r->flags |= IORESOURCE_UNSET;
r->end -= r->start;
@@ -1358,7 +1346,8 @@ static void __init pcibios_allocate_resources(int pass)
* but keep it unregistered.
*/
u32 reg;
- DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
+ pr_debug("PCI: Switching off ROM of %s\n",
+ pci_name(dev));
r->flags &= ~IORESOURCE_ROM_ENABLE;
pci_read_config_dword(dev, dev->rom_base_reg, &reg);
pci_write_config_dword(dev, dev->rom_base_reg,
@@ -1367,6 +1356,63 @@ static void __init pcibios_allocate_resources(int pass)
}
}
+static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ resource_size_t offset;
+ struct resource *res, *pres;
+ int i;
+
+ pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
+
+ /* Check for IO */
+ if (!(hose->io_resource.flags & IORESOURCE_IO))
+ goto no_io;
+ offset = (unsigned long)hose->io_base_virt - _IO_BASE;
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+ BUG_ON(res == NULL);
+ res->name = "Legacy IO";
+ res->flags = IORESOURCE_IO;
+ res->start = offset;
+ res->end = (offset + 0xfff) & 0xfffffffful;
+ pr_debug("Candidate legacy IO: %pR\n", res);
+ if (request_resource(&hose->io_resource, res)) {
+ printk(KERN_DEBUG
+ "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
+ pci_domain_nr(bus), bus->number, res);
+ kfree(res);
+ }
+
+ no_io:
+ /* Check for memory */
+ offset = hose->pci_mem_offset;
+ pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
+ for (i = 0; i < 3; i++) {
+ pres = &hose->mem_resources[i];
+ if (!(pres->flags & IORESOURCE_MEM))
+ continue;
+ pr_debug("hose mem res: %pR\n", pres);
+ if ((pres->start - offset) <= 0xa0000 &&
+ (pres->end - offset) >= 0xbffff)
+ break;
+ }
+ if (i >= 3)
+ return;
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+ BUG_ON(res == NULL);
+ res->name = "Legacy VGA memory";
+ res->flags = IORESOURCE_MEM;
+ res->start = 0xa0000 + offset;
+ res->end = 0xbffff + offset;
+ pr_debug("Candidate VGA memory: %pR\n", res);
+ if (request_resource(pres, res)) {
+ printk(KERN_DEBUG
+ "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
+ pci_domain_nr(bus), bus->number, res);
+ kfree(res);
+ }
+}
+
void __init pcibios_resource_survey(void)
{
struct pci_bus *b;
@@ -1382,8 +1428,20 @@ void __init pcibios_resource_survey(void)
pcibios_allocate_resources(1);
}
+ /* Before we start assigning unassigned resource, we try to reserve
+ * the low IO area and the VGA memory area if they intersect the
+ * bus available resources to avoid allocating things on top of them
+ */
if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
- DBG("PCI: Assigning unassigned resouces...\n");
+ list_for_each_entry(b, &pci_root_buses, node)
+ pcibios_reserve_legacy_regions(b);
+ }
+
+ /* Now, if the platform didn't decide to blindly trust the firmware,
+ * we proceed to assigning things that were left unassigned
+ */
+ if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
+ pr_debug("PCI: Assigning unassigned resouces...\n");
pci_assign_unassigned_resources();
}
@@ -1393,9 +1451,11 @@ void __init pcibios_resource_survey(void)
}
#ifdef CONFIG_HOTPLUG
-/* This is used by the pSeries hotplug driver to allocate resource
+
+/* This is used by the PCI hotplug driver to allocate resource
* of newly plugged busses. We can try to consolidate with the
- * rest of the code later, for now, keep it as-is
+ * rest of the code later, for now, keep it as-is as our main
+ * resource allocation function doesn't deal with sub-trees yet.
*/
void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
{
@@ -1410,6 +1470,14 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
if (r->parent || !r->start || !r->flags)
continue;
+
+ pr_debug("PCI: Claiming %s: "
+ "Resource %d: %016llx..%016llx [%x]\n",
+ pci_name(dev), i,
+ (unsigned long long)r->start,
+ (unsigned long long)r->end,
+ (unsigned int)r->flags);
+
pci_claim_resource(dev, i);
}
}
@@ -1418,6 +1486,31 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
pcibios_claim_one_bus(child_bus);
}
EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
+
+
+/* pcibios_finish_adding_to_bus
+ *
+ * This is to be called by the hotplug code after devices have been
+ * added to a bus, this include calling it for a PHB that is just
+ * being added
+ */
+void pcibios_finish_adding_to_bus(struct pci_bus *bus)
+{
+ pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
+ pci_domain_nr(bus), bus->number);
+
+ /* Allocate bus and devices resources */
+ pcibios_allocate_bus_resources(bus);
+ pcibios_claim_one_bus(bus);
+
+ /* Add new devices to global lists. Register in proc, sysfs. */
+ pci_bus_add_devices(bus);
+
+ /* Fixup EEH */
+ eeh_add_device_tree_late(bus);
+}
+EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
+
#endif /* CONFIG_HOTPLUG */
int pcibios_enable_device(struct pci_dev *dev, int mask)
@@ -1428,3 +1521,61 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return pci_enable_resources(dev, mask);
}
+
+void __devinit pcibios_setup_phb_resources(struct pci_controller *hose)
+{
+ struct pci_bus *bus = hose->bus;
+ struct resource *res;
+ int i;
+
+ /* Hookup PHB IO resource */
+ bus->resource[0] = res = &hose->io_resource;
+
+ if (!res->flags) {
+ printk(KERN_WARNING "PCI: I/O resource not set for host"
+ " bridge %s (domain %d)\n",
+ hose->dn->full_name, hose->global_number);
+#ifdef CONFIG_PPC32
+ /* Workaround for lack of IO resource only on 32-bit */
+ res->start = (unsigned long)hose->io_base_virt - isa_io_base;
+ res->end = res->start + IO_SPACE_LIMIT;
+ res->flags = IORESOURCE_IO;
+#endif /* CONFIG_PPC32 */
+ }
+
+ pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ (unsigned long)res->flags);
+
+ /* Hookup PHB Memory resources */
+ for (i = 0; i < 3; ++i) {
+ res = &hose->mem_resources[i];
+ if (!res->flags) {
+ if (i > 0)
+ continue;
+ printk(KERN_ERR "PCI: Memory resource 0 not set for "
+ "host bridge %s (domain %d)\n",
+ hose->dn->full_name, hose->global_number);
+#ifdef CONFIG_PPC32
+ /* Workaround for lack of MEM resource only on 32-bit */
+ res->start = hose->pci_mem_offset;
+ res->end = (resource_size_t)-1LL;
+ res->flags = IORESOURCE_MEM;
+#endif /* CONFIG_PPC32 */
+ }
+ bus->resource[i+1] = res;
+
+ pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", i,
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ (unsigned long)res->flags);
+ }
+
+ pr_debug("PCI: PHB MEM offset = %016llx\n",
+ (unsigned long long)hose->pci_mem_offset);
+ pr_debug("PCI: PHB IO offset = %08lx\n",
+ (unsigned long)hose->io_base_virt - _IO_BASE);
+
+}
+
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 131b1dfa68c6..132cd80afa21 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -26,12 +26,6 @@
#undef DEBUG
-#ifdef DEBUG
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
-
unsigned long isa_io_base = 0;
unsigned long pci_dram_offset = 0;
int pcibios_assign_bus_offset = 1;
@@ -272,17 +266,14 @@ pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
{
struct device_node *parent, *np;
- if (!have_of)
- return NULL;
-
- DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
+ pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
parent = scan_OF_for_pci_bus(bus);
if (parent == NULL)
return NULL;
- DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>");
+ pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
np = scan_OF_for_pci_dev(parent, devfn);
of_node_put(parent);
- DBG(" result is %s\n", np ? np->full_name : "<NULL>");
+ pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
/* XXX most callers don't release the returned node
* mostly because ppc64 doesn't increase the refcount,
@@ -315,8 +306,6 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
struct pci_controller* hose;
struct pci_dev* dev = NULL;
- if (!have_of)
- return -ENODEV;
/* Make sure it's really a PCI device */
hose = pci_find_hose_for_OF_device(node);
if (!hose || !hose->dn)
@@ -379,10 +368,41 @@ void pcibios_make_OF_bus_map(void)
}
#endif /* CONFIG_PPC_OF */
+static void __devinit pcibios_scan_phb(struct pci_controller *hose)
+{
+ struct pci_bus *bus;
+ struct device_node *node = hose->dn;
+ unsigned long io_offset;
+ struct resource *res = &hose->io_resource;
+
+ pr_debug("PCI: Scanning PHB %s\n",
+ node ? node->full_name : "<NO NAME>");
+
+ /* Create an empty bus for the toplevel */
+ bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
+ if (bus == NULL) {
+ printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
+ hose->global_number);
+ return;
+ }
+ bus->secondary = hose->first_busno;
+ hose->bus = bus;
+
+ /* Fixup IO space offset */
+ io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
+ res->start = (res->start + io_offset) & 0xffffffffu;
+ res->end = (res->end + io_offset) & 0xffffffffu;
+
+ /* Wire up PHB bus resources */
+ pcibios_setup_phb_resources(hose);
+
+ /* Scan children */
+ hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
+}
+
static int __init pcibios_init(void)
{
struct pci_controller *hose, *tmp;
- struct pci_bus *bus;
int next_busno = 0;
printk(KERN_INFO "PCI: Probing PCI hardware\n");
@@ -395,12 +415,8 @@ static int __init pcibios_init(void)
if (pci_assign_all_buses)
hose->first_busno = next_busno;
hose->last_busno = 0xff;
- bus = pci_scan_bus_parented(hose->parent, hose->first_busno,
- hose->ops, hose);
- if (bus) {
- pci_bus_add_devices(bus);
- hose->last_busno = bus->subordinate;
- }
+ pcibios_scan_phb(hose);
+ pci_bus_add_devices(hose->bus);
if (pci_assign_all_buses || next_busno <= hose->last_busno)
next_busno = hose->last_busno + pcibios_assign_bus_offset;
}
@@ -410,7 +426,7 @@ static int __init pcibios_init(void)
* numbers vs. kernel bus numbers since we may have to
* remap them.
*/
- if (pci_assign_all_buses && have_of)
+ if (pci_assign_all_buses)
pcibios_make_OF_bus_map();
/* Call common code to handle resource allocation */
@@ -425,54 +441,6 @@ static int __init pcibios_init(void)
subsys_initcall(pcibios_init);
-void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
-{
- struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
- unsigned long io_offset;
- struct resource *res;
- int i;
- struct pci_dev *dev;
-
- /* Hookup PHB resources */
- io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
- if (bus->parent == NULL) {
- /* This is a host bridge - fill in its resources */
- hose->bus = bus;
-
- bus->resource[0] = res = &hose->io_resource;
- if (!res->flags) {
- if (io_offset)
- printk(KERN_ERR "I/O resource not set for host"
- " bridge %d\n", hose->global_number);
- res->start = 0;
- res->end = IO_SPACE_LIMIT;
- res->flags = IORESOURCE_IO;
- }
- res->start = (res->start + io_offset) & 0xffffffffu;
- res->end = (res->end + io_offset) & 0xffffffffu;
-
- for (i = 0; i < 3; ++i) {
- res = &hose->mem_resources[i];
- if (!res->flags) {
- if (i > 0)
- continue;
- printk(KERN_ERR "Memory resource not set for "
- "host bridge %d\n", hose->global_number);
- res->start = hose->pci_mem_offset;
- res->end = ~0U;
- res->flags = IORESOURCE_MEM;
- }
- bus->resource[i+1] = res;
- }
- }
-
- if (ppc_md.pci_dma_bus_setup)
- ppc_md.pci_dma_bus_setup(bus);
-
- list_for_each_entry(dev, &bus->devices, bus_list)
- pcibios_setup_new_device(dev);
-}
-
/* the next one is stolen from the alpha port... */
void __init
pcibios_update_irq(struct pci_dev *dev, int irq)
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 3502b9101e6b..586962f65c2a 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -32,13 +32,6 @@
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
-#ifdef DEBUG
-#include <asm/udbg.h>
-#define DBG(fmt...) printk(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
unsigned long pci_probe_only = 1;
/* pci_io_base -- the base address from which io bars are offsets.
@@ -102,7 +95,7 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
addrs = of_get_property(node, "assigned-addresses", &proplen);
if (!addrs)
return;
- DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
+ pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
for (; proplen >= 20; proplen -= 20, addrs += 5) {
flags = pci_parse_of_flags(addrs[0]);
if (!flags)
@@ -112,8 +105,9 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
if (!size)
continue;
i = addrs[0] & 0xff;
- DBG(" base: %llx, size: %llx, i: %x\n",
- (unsigned long long)base, (unsigned long long)size, i);
+ pr_debug(" base: %llx, size: %llx, i: %x\n",
+ (unsigned long long)base,
+ (unsigned long long)size, i);
if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
@@ -144,7 +138,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
if (type == NULL)
type = "";
- DBG(" create device, devfn: %x, type: %s\n", devfn, type);
+ pr_debug(" create device, devfn: %x, type: %s\n", devfn, type);
dev->bus = bus;
dev->sysdata = node;
@@ -165,8 +159,8 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
dev->class = get_int_prop(node, "class-code", 0);
dev->revision = get_int_prop(node, "revision-id", 0);
- DBG(" class: 0x%x\n", dev->class);
- DBG(" revision: 0x%x\n", dev->revision);
+ pr_debug(" class: 0x%x\n", dev->class);
+ pr_debug(" revision: 0x%x\n", dev->revision);
dev->current_state = 4; /* unknown power state */
dev->error_state = pci_channel_io_normal;
@@ -187,7 +181,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
pci_parse_of_addrs(node, dev);
- DBG(" adding to system ...\n");
+ pr_debug(" adding to system ...\n");
pci_device_add(dev, bus);
@@ -195,19 +189,20 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
}
EXPORT_SYMBOL(of_create_pci_dev);
-void __devinit of_scan_bus(struct device_node *node,
- struct pci_bus *bus)
+static void __devinit __of_scan_bus(struct device_node *node,
+ struct pci_bus *bus, int rescan_existing)
{
struct device_node *child;
const u32 *reg;
int reglen, devfn;
struct pci_dev *dev;
- DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number);
+ pr_debug("of_scan_bus(%s) bus no %d... \n",
+ node->full_name, bus->number);
/* Scan direct children */
for_each_child_of_node(node, child) {
- DBG(" * %s\n", child->full_name);
+ pr_debug(" * %s\n", child->full_name);
reg = of_get_property(child, "reg", &reglen);
if (reg == NULL || reglen < 20)
continue;
@@ -217,11 +212,15 @@ void __devinit of_scan_bus(struct device_node *node,
dev = of_create_pci_dev(child, bus, devfn);
if (!dev)
continue;
- DBG(" dev header type: %x\n", dev->hdr_type);
+ pr_debug(" dev header type: %x\n", dev->hdr_type);
}
- /* Ally all fixups */
- pcibios_fixup_of_probed_bus(bus);
+ /* Apply all fixups necessary. We don't fixup the bus "self"
+ * for an existing bridge that is being rescanned
+ */
+ if (!rescan_existing)
+ pcibios_setup_bus_self(bus);
+ pcibios_setup_bus_devices(bus);
/* Now scan child busses */
list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -233,7 +232,20 @@ void __devinit of_scan_bus(struct device_node *node,
}
}
}
-EXPORT_SYMBOL(of_scan_bus);
+
+void __devinit of_scan_bus(struct device_node *node,
+ struct pci_bus *bus)
+{
+ __of_scan_bus(node, bus, 0);
+}
+EXPORT_SYMBOL_GPL(of_scan_bus);
+
+void __devinit of_rescan_bus(struct device_node *node,
+ struct pci_bus *bus)
+{
+ __of_scan_bus(node, bus, 1);
+}
+EXPORT_SYMBOL_GPL(of_rescan_bus);
void __devinit of_scan_pci_bridge(struct device_node *node,
struct pci_dev *dev)
@@ -245,7 +257,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
unsigned int flags;
u64 size;
- DBG("of_scan_pci_bridge(%s)\n", node->full_name);
+ pr_debug("of_scan_pci_bridge(%s)\n", node->full_name);
/* parse bus-range property */
busrange = of_get_property(node, "bus-range", &len);
@@ -309,12 +321,12 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
}
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number);
- DBG(" bus name: %s\n", bus->name);
+ pr_debug(" bus name: %s\n", bus->name);
mode = PCI_PROBE_NORMAL;
if (ppc_md.pci_probe_mode)
mode = ppc_md.pci_probe_mode(bus);
- DBG(" probe mode: %d\n", mode);
+ pr_debug(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE)
of_scan_bus(node, bus);
@@ -327,9 +339,10 @@ void __devinit scan_phb(struct pci_controller *hose)
{
struct pci_bus *bus;
struct device_node *node = hose->dn;
- int i, mode;
+ int mode;
- DBG("PCI: Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
+ pr_debug("PCI: Scanning PHB %s\n",
+ node ? node->full_name : "<NO NAME>");
/* Create an empty bus for the toplevel */
bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
@@ -345,26 +358,13 @@ void __devinit scan_phb(struct pci_controller *hose)
pcibios_map_io_space(bus);
/* Wire up PHB bus resources */
- DBG("PCI: PHB IO resource = %016lx-%016lx [%lx]\n",
- hose->io_resource.start, hose->io_resource.end,
- hose->io_resource.flags);
- bus->resource[0] = &hose->io_resource;
- for (i = 0; i < 3; ++i) {
- DBG("PCI: PHB MEM resource %d = %016lx-%016lx [%lx]\n", i,
- hose->mem_resources[i].start,
- hose->mem_resources[i].end,
- hose->mem_resources[i].flags);
- bus->resource[i+1] = &hose->mem_resources[i];
- }
- DBG("PCI: PHB MEM offset = %016lx\n", hose->pci_mem_offset);
- DBG("PCI: PHB IO offset = %08lx\n",
- (unsigned long)hose->io_base_virt - _IO_BASE);
+ pcibios_setup_phb_resources(hose);
/* Get probe mode and perform scan */
mode = PCI_PROBE_NORMAL;
if (node && ppc_md.pci_probe_mode)
mode = ppc_md.pci_probe_mode(bus);
- DBG(" probe mode: %d\n", mode);
+ pr_debug(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE) {
bus->subordinate = hose->last_busno;
of_scan_bus(node, bus);
@@ -380,7 +380,7 @@ static int __init pcibios_init(void)
printk(KERN_INFO "PCI: Probing PCI hardware\n");
- /* For now, override phys_mem_access_prot. If we need it,
+ /* For now, override phys_mem_access_prot. If we need it,g
* later, we may move that initialization to each ppc_md
*/
ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
@@ -388,6 +388,11 @@ static int __init pcibios_init(void)
if (pci_probe_only)
ppc_pci_flags |= PPC_PCI_PROBE_ONLY;
+ /* On ppc64, we always enable PCI domains and we keep domain 0
+ * backward compatible in /proc for video cards
+ */
+ ppc_pci_flags |= PPC_PCI_ENABLE_PROC_DOMAINS | PPC_PCI_COMPAT_DOMAIN_0;
+
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
scan_phb(hose);
@@ -422,8 +427,8 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
if (bus->self) {
struct resource *res = bus->resource[0];
- DBG("IO unmapping for PCI-PCI bridge %s\n",
- pci_name(bus->self));
+ pr_debug("IO unmapping for PCI-PCI bridge %s\n",
+ pci_name(bus->self));
__flush_hash_table_range(&init_mm, res->start + _IO_BASE,
res->end + _IO_BASE + 1);
@@ -437,8 +442,8 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
if (hose->io_base_alloc == 0)
return 0;
- DBG("IO unmapping for PHB %s\n", hose->dn->full_name);
- DBG(" alloc=0x%p\n", hose->io_base_alloc);
+ pr_debug("IO unmapping for PHB %s\n", hose->dn->full_name);
+ pr_debug(" alloc=0x%p\n", hose->io_base_alloc);
/* This is a PHB, we fully unmap the IO area */
vunmap(hose->io_base_alloc);
@@ -463,11 +468,11 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
* thus HPTEs will be faulted in when needed
*/
if (bus->self) {
- DBG("IO mapping for PCI-PCI bridge %s\n",
- pci_name(bus->self));
- DBG(" virt=0x%016lx...0x%016lx\n",
- bus->resource[0]->start + _IO_BASE,
- bus->resource[0]->end + _IO_BASE);
+ pr_debug("IO mapping for PCI-PCI bridge %s\n",
+ pci_name(bus->self));
+ pr_debug(" virt=0x%016lx...0x%016lx\n",
+ bus->resource[0]->start + _IO_BASE,
+ bus->resource[0]->end + _IO_BASE);
return 0;
}
@@ -496,11 +501,11 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
hose->io_base_virt = (void __iomem *)(area->addr +
hose->io_base_phys - phys_page);
- DBG("IO mapping for PHB %s\n", hose->dn->full_name);
- DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
- hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
- DBG(" size=0x%016lx (alloc=0x%016lx)\n",
- hose->pci_io_size, size_page);
+ pr_debug("IO mapping for PHB %s\n", hose->dn->full_name);
+ pr_debug(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
+ hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
+ pr_debug(" size=0x%016lx (alloc=0x%016lx)\n",
+ hose->pci_io_size, size_page);
/* Establish the mapping */
if (__ioremap_at(phys_page, area->addr, size_page,
@@ -512,24 +517,13 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
hose->io_resource.start += io_virt_offset;
hose->io_resource.end += io_virt_offset;
- DBG(" hose->io_resource=0x%016lx...0x%016lx\n",
- hose->io_resource.start, hose->io_resource.end);
+ pr_debug(" hose->io_resource=0x%016lx...0x%016lx\n",
+ hose->io_resource.start, hose->io_resource.end);
return 0;
}
EXPORT_SYMBOL_GPL(pcibios_map_io_space);
-void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- if (ppc_md.pci_dma_bus_setup)
- ppc_md.pci_dma_bus_setup(bus);
-
- list_for_each_entry(dev, &bus->devices, bus_list)
- pcibios_setup_new_device(dev);
-}
-
unsigned long pci_address_to_pio(phys_addr_t address)
{
struct pci_controller *hose, *tmp;
@@ -566,9 +560,14 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
* G5 machines... So when something asks for bus 0 io base
* (bus 0 is HT root), we return the AGP one instead.
*/
- if (machine_is_compatible("MacRISC4"))
- if (in_bus == 0)
+ if (in_bus == 0 && machine_is_compatible("MacRISC4")) {
+ struct device_node *agp;
+
+ agp = of_find_compatible_node(NULL, NULL, "u3-agp");
+ if (agp)
in_bus = 0xf0;
+ of_node_put(agp);
+ }
/* That syscall isn't quite compatible with PCI domains, but it's
* used on pre-domains setup. We return the first match
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 260089dccfb0..c8b27bb4dbde 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -116,12 +116,6 @@ EXPORT_SYMBOL(giveup_spe);
#ifndef CONFIG_PPC64
EXPORT_SYMBOL(flush_instruction_cache);
-EXPORT_SYMBOL(flush_tlb_kernel_range);
-EXPORT_SYMBOL(flush_tlb_page);
-EXPORT_SYMBOL(_tlbie);
-#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
-EXPORT_SYMBOL(_tlbil_va);
-#endif
#endif
EXPORT_SYMBOL(__flush_icache_range);
EXPORT_SYMBOL(flush_dcache_range);
@@ -171,11 +165,11 @@ EXPORT_SYMBOL(timer_interrupt);
EXPORT_SYMBOL(irq_desc);
EXPORT_SYMBOL(tb_ticks_per_jiffy);
EXPORT_SYMBOL(cacheable_memcpy);
+EXPORT_SYMBOL(cacheable_memzero);
#endif
#ifdef CONFIG_PPC32
-EXPORT_SYMBOL(next_mmu_context);
-EXPORT_SYMBOL(set_context);
+EXPORT_SYMBOL(switch_mmu_context);
#endif
#ifdef CONFIG_PPC_STD_MMU_32
diff --git a/arch/powerpc/xmon/setjmp.S b/arch/powerpc/kernel/ppc_save_regs.S
index 04c0b305ad4a..5113bd2285e1 100644
--- a/arch/powerpc/xmon/setjmp.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -22,7 +22,7 @@
* that will be different for 32-bit and 64-bit, because of the
* different ABIs, though).
*/
-_GLOBAL(xmon_save_regs)
+_GLOBAL(ppc_save_regs)
PPC_STL r0,0*SZL(r3)
PPC_STL r2,2*SZL(r3)
PPC_STL r3,3*SZL(r3)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 957bded0020d..fb7049c054c0 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -33,6 +33,7 @@
#include <linux/mqueue.h>
#include <linux/hardirq.h>
#include <linux/utsname.h>
+#include <linux/kernel_stat.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -467,6 +468,8 @@ static struct regbit {
{MSR_VEC, "VEC"},
{MSR_VSX, "VSX"},
{MSR_ME, "ME"},
+ {MSR_CE, "CE"},
+ {MSR_DE, "DE"},
{MSR_IR, "IR"},
{MSR_DR, "DR"},
{0, NULL}
@@ -998,7 +1001,7 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
-static int kstack_depth_to_print = 64;
+static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
void show_stack(struct task_struct *tsk, unsigned long *stack)
{
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 3a2dc7e6586a..c09cffafb6ee 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -824,11 +824,11 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
#endif
#ifdef CONFIG_KEXEC
- lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
+ lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
if (lprop)
crashk_res.start = *lprop;
- lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
+ lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
if (lprop)
crashk_res.end = crashk_res.start + *lprop - 1;
#endif
@@ -893,12 +893,12 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
u64 base, size, lmb_size;
unsigned int is_kexec_kdump = 0, rngs;
- ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
+ ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t))
return 0;
lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
- dm = (cell_t *)of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
+ dm = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
if (dm == NULL || l < sizeof(cell_t))
return 0;
@@ -907,7 +907,7 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
return 0;
/* check if this is a kexec/kdump kernel. */
- usm = (cell_t *)of_get_flat_dt_prop(node, "linux,drconf-usable-memory",
+ usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory",
&l);
if (usm != NULL)
is_kexec_kdump = 1;
@@ -981,9 +981,9 @@ static int __init early_init_dt_scan_memory(unsigned long node,
} else if (strcmp(type, "memory") != 0)
return 0;
- reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l);
+ reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
if (reg == NULL)
- reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
+ reg = of_get_flat_dt_prop(node, "reg", &l);
if (reg == NULL)
return 0;
@@ -1160,6 +1160,8 @@ static inline void __init phyp_dump_reserve_mem(void) {}
void __init early_init_devtree(void *params)
{
+ unsigned long limit;
+
DBG(" -> early_init_devtree(%p)\n", params);
/* Setup flat device-tree pointer */
@@ -1200,7 +1202,19 @@ void __init early_init_devtree(void *params)
early_reserve_mem();
phyp_dump_reserve_mem();
- lmb_enforce_memory_limit(memory_limit);
+ limit = memory_limit;
+ if (! limit) {
+ unsigned long memsize;
+
+ /* Ensure that total memory size is page-aligned, because
+ * otherwise mark_bootmem() gets upset. */
+ lmb_analyze();
+ memsize = lmb_phys_mem_size();
+ if ((memsize & PAGE_MASK) != memsize)
+ limit = memsize & PAGE_MASK;
+ }
+ lmb_enforce_memory_limit(limit);
+
lmb_analyze();
DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
@@ -1271,6 +1285,37 @@ struct device_node *of_find_node_by_phandle(phandle handle)
EXPORT_SYMBOL(of_find_node_by_phandle);
/**
+ * of_find_next_cache_node - Find a node's subsidiary cache
+ * @np: node of type "cpu" or "cache"
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done. Caller should hold a reference
+ * to np.
+ */
+struct device_node *of_find_next_cache_node(struct device_node *np)
+{
+ struct device_node *child;
+ const phandle *handle;
+
+ handle = of_get_property(np, "l2-cache", NULL);
+ if (!handle)
+ handle = of_get_property(np, "next-level-cache", NULL);
+
+ if (handle)
+ return of_find_node_by_phandle(*handle);
+
+ /* OF on pmac has nodes instead of properties named "l2-cache"
+ * beneath CPU nodes.
+ */
+ if (!strcmp(np->type, "cpu"))
+ for_each_child_of_node(np, child)
+ if (!strcmp(child->type, "cache"))
+ return child;
+
+ return NULL;
+}
+
+/**
* of_find_all_nodes - Get next node in global list
* @prev: Previous node or NULL to start iteration
* of_node_put() will be called on it
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 2445945d3761..7f1b33d5e30d 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1210,7 +1210,7 @@ static void __init prom_initialize_tce_table(void)
/* Initialize the table to have a one-to-one mapping
* over the allocated size.
*/
- tce_entryp = (unsigned long *)base;
+ tce_entryp = (u64 *)base;
for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
tce_entry = (i << PAGE_SHIFT);
tce_entry |= 0x3;
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index a11d68976dc8..8f0856f312da 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -232,11 +232,6 @@ int of_pci_address_to_resource(struct device_node *dev, int bar,
}
EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
-static u8 of_irq_pci_swizzle(u8 slot, u8 pin)
-{
- return (((pin - 1) + slot) % 4) + 1;
-}
-
int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
{
struct device_node *dn, *ppnode;
@@ -306,7 +301,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
/* We can only get here if we hit a P2P bridge with no node,
* let's do standard swizzling and try again
*/
- lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec);
+ lspec = pci_swizzle_interrupt_pin(pdev, lspec);
pdev = ppdev;
}
@@ -734,10 +729,7 @@ void of_irq_map_init(unsigned int flags)
if (flags & OF_IMAP_NO_PHANDLE) {
struct device_node *np;
- for(np = NULL; (np = of_find_all_nodes(np)) != NULL;) {
- if (of_get_property(np, "interrupt-controller", NULL)
- == NULL)
- continue;
+ for_each_node_with_property(np, "interrupt-controller") {
/* Skip /chosen/interrupt-controller */
if (strcmp(np->name, "chosen") == 0)
continue;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 1f8505c23548..fdfe14c4bdef 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -566,6 +566,32 @@ int rtas_get_sensor(int sensor, int index, int *state)
}
EXPORT_SYMBOL(rtas_get_sensor);
+bool rtas_indicator_present(int token, int *maxindex)
+{
+ int proplen, count, i;
+ const struct indicator_elem {
+ u32 token;
+ u32 maxindex;
+ } *indicators;
+
+ indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
+ if (!indicators)
+ return false;
+
+ count = proplen / sizeof(struct indicator_elem);
+
+ for (i = 0; i < count; i++) {
+ if (indicators[i].token != token)
+ continue;
+ if (maxindex)
+ *maxindex = indicators[i].maxindex;
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(rtas_indicator_present);
+
int rtas_set_indicator(int indicator, int index, int new_value)
{
int token = rtas_token("set-indicator");
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 589a2797eac2..8869001ab5d7 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -301,51 +301,3 @@ void __init find_and_init_phbs(void)
#endif /* CONFIG_PPC32 */
}
}
-
-/* RPA-specific bits for removing PHBs */
-int pcibios_remove_root_bus(struct pci_controller *phb)
-{
- struct pci_bus *b = phb->bus;
- struct resource *res;
- int rc, i;
-
- res = b->resource[0];
- if (!res->flags) {
- printk(KERN_ERR "%s: no IO resource for PHB %s\n", __func__,
- b->name);
- return 1;
- }
-
- rc = pcibios_unmap_io_space(b);
- if (rc) {
- printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
- __func__, b->name);
- return 1;
- }
-
- if (release_resource(res)) {
- printk(KERN_ERR "%s: failed to release IO on bus %s\n",
- __func__, b->name);
- return 1;
- }
-
- for (i = 1; i < 3; ++i) {
- res = b->resource[i];
- if (!res->flags && i == 0) {
- printk(KERN_ERR "%s: no MEM resource for PHB %s\n",
- __func__, b->name);
- return 1;
- }
- if (res->flags && release_resource(res)) {
- printk(KERN_ERR
- "%s: failed to release IO %d on bus %s\n",
- __func__, i, b->name);
- return 1;
- }
- }
-
- pcibios_free_controller(phb);
-
- return 0;
-}
-EXPORT_SYMBOL(pcibios_remove_root_bus);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index c1a27626a940..9e1ca745d8f0 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -38,6 +38,7 @@
#include <asm/time.h>
#include <asm/serial.h>
#include <asm/udbg.h>
+#include <asm/mmu_context.h>
#include "setup.h"
@@ -49,12 +50,12 @@ int boot_cpuid;
EXPORT_SYMBOL_GPL(boot_cpuid);
int boot_cpuid_phys;
+int smp_hw_index[NR_CPUS];
+
unsigned long ISA_DMA_THRESHOLD;
unsigned int DMA_MODE_READ;
unsigned int DMA_MODE_WRITE;
-int have_of = 1;
-
#ifdef CONFIG_VGA_CONSOLE
unsigned long vgacon_remap_base;
EXPORT_SYMBOL(vgacon_remap_base);
@@ -97,6 +98,10 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
PTRRELOC(&__start___ftr_fixup),
PTRRELOC(&__stop___ftr_fixup));
+ do_feature_fixups(spec->mmu_features,
+ PTRRELOC(&__start___mmu_ftr_fixup),
+ PTRRELOC(&__stop___mmu_ftr_fixup));
+
do_lwsync_fixups(spec->cpu_features,
PTRRELOC(&__start___lwsync_fixup),
PTRRELOC(&__stop___lwsync_fixup));
@@ -121,6 +126,8 @@ notrace void __init machine_init(unsigned long dt_ptr)
probe_machine();
+ setup_kdump_trampoline();
+
#ifdef CONFIG_6xx
if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
cpu_has_feature(CPU_FTR_CAN_NAP))
@@ -326,4 +333,8 @@ void __init setup_arch(char **cmdline_p)
if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
paging_init();
+
+ /* Initialize the MMU context management stuff */
+ mmu_context_init();
+
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 169d74cef157..d8bd2161e738 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -70,7 +70,6 @@
#define DBG(fmt...)
#endif
-int have_of = 1;
int boot_cpuid = 0;
u64 ppc64_pft_size;
@@ -362,6 +361,8 @@ void __init setup_system(void)
*/
do_feature_fixups(cur_cpu_spec->cpu_features,
&__start___ftr_fixup, &__stop___ftr_fixup);
+ do_feature_fixups(cur_cpu_spec->mmu_features,
+ &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup);
do_feature_fixups(powerpc_firmware_features,
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
do_lwsync_fixups(cur_cpu_spec->cpu_features,
@@ -606,8 +607,6 @@ void __init setup_per_cpu_areas(void)
for_each_possible_cpu(i) {
ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size);
- if (!ptr)
- panic("Cannot allocate cpu data for CPU %d\n", i);
paca[i].data_offset = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index bc892e69b4f7..a5e54526403d 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -113,7 +113,7 @@ void __devinit smp_generic_give_timebase(void)
{
int i, score, score2, old, min=0, max=5000, offset=1000;
- printk("Synchronizing timebase\n");
+ pr_debug("Software timebase sync\n");
/* if this fails then this kernel won't work anyway... */
tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL );
@@ -123,13 +123,13 @@ void __devinit smp_generic_give_timebase(void)
while (!tbsync->ack)
barrier();
- printk("Got ack\n");
+ pr_debug("Got ack\n");
/* binary search */
for (old = -1; old != offset ; offset = (min+max) / 2) {
score = start_contest(kSetAndTest, offset, NUM_ITER);
- printk("score %d, offset %d\n", score, offset );
+ pr_debug("score %d, offset %d\n", score, offset );
if( score > 0 )
max = offset;
@@ -140,8 +140,8 @@ void __devinit smp_generic_give_timebase(void)
score = start_contest(kSetAndTest, min, NUM_ITER);
score2 = start_contest(kSetAndTest, max, NUM_ITER);
- printk("Min %d (score %d), Max %d (score %d)\n",
- min, score, max, score2);
+ pr_debug("Min %d (score %d), Max %d (score %d)\n",
+ min, score, max, score2);
score = abs(score);
score2 = abs(score2);
offset = (score < score2) ? min : max;
@@ -155,7 +155,7 @@ void __devinit smp_generic_give_timebase(void)
if (score2 <= score || score2 < 20)
break;
}
- printk("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER );
+ pr_debug("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER );
/* exiting */
tbsync->cmd = kExit;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ff9f7010097d..65484b2200b3 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -57,16 +57,11 @@
#define DBG(fmt...)
#endif
-int smp_hw_index[NR_CPUS];
struct thread_info *secondary_ti;
-cpumask_t cpu_possible_map = CPU_MASK_NONE;
-cpumask_t cpu_online_map = CPU_MASK_NONE;
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE;
-EXPORT_SYMBOL(cpu_online_map);
-EXPORT_SYMBOL(cpu_possible_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
@@ -123,6 +118,65 @@ void smp_message_recv(int msg)
}
}
+static irqreturn_t call_function_action(int irq, void *data)
+{
+ generic_smp_call_function_interrupt();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t reschedule_action(int irq, void *data)
+{
+ /* we just need the return path side effect of checking need_resched */
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t call_function_single_action(int irq, void *data)
+{
+ generic_smp_call_function_single_interrupt();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t debug_ipi_action(int irq, void *data)
+{
+ smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
+ return IRQ_HANDLED;
+}
+
+static irq_handler_t smp_ipi_action[] = {
+ [PPC_MSG_CALL_FUNCTION] = call_function_action,
+ [PPC_MSG_RESCHEDULE] = reschedule_action,
+ [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
+ [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
+};
+
+const char *smp_ipi_name[] = {
+ [PPC_MSG_CALL_FUNCTION] = "ipi call function",
+ [PPC_MSG_RESCHEDULE] = "ipi reschedule",
+ [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
+ [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
+};
+
+/* optional function to request ipi, for controllers with >= 4 ipis */
+int smp_request_message_ipi(int virq, int msg)
+{
+ int err;
+
+ if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
+ return -EINVAL;
+ }
+#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
+ if (msg == PPC_MSG_DEBUGGER_BREAK) {
+ return 1;
+ }
+#endif
+ err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU,
+ smp_ipi_name[msg], 0);
+ WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
+ virq, smp_ipi_name[msg], err);
+
+ return err;
+}
+
void smp_send_reschedule(int cpu)
{
if (likely(smp_ops))
@@ -408,8 +462,7 @@ out:
static struct device_node *cpu_to_l2cache(int cpu)
{
struct device_node *np;
- const phandle *php;
- phandle ph;
+ struct device_node *cache;
if (!cpu_present(cpu))
return NULL;
@@ -418,13 +471,11 @@ static struct device_node *cpu_to_l2cache(int cpu)
if (np == NULL)
return NULL;
- php = of_get_property(np, "l2-cache", NULL);
- if (php == NULL)
- return NULL;
- ph = *php;
+ cache = of_find_next_cache_node(np);
+
of_node_put(np);
- return of_find_node_by_phandle(ph);
+ return cache;
}
/* Activate a secondary processor. */
diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c
index 77b7b34b5955..560c96119501 100644
--- a/arch/powerpc/kernel/swsusp.c
+++ b/arch/powerpc/kernel/swsusp.c
@@ -34,6 +34,6 @@ void save_processor_state(void)
void restore_processor_state(void)
{
#ifdef CONFIG_PPC32
- set_context(current->active_mm->context.id, current->active_mm->pgd);
+ switch_mmu_context(NULL, current->active_mm);
#endif
}
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index 77fc76607ab2..b47d8ceffb52 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -5,7 +5,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
-
+#include <asm/mmu.h>
/*
* Structure for storing CPU registers on the save area.
@@ -279,7 +279,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mtibatl 3,r4
#endif
-BEGIN_FTR_SECTION
+BEGIN_MMU_FTR_SECTION
li r4,0
mtspr SPRN_DBAT4U,r4
mtspr SPRN_DBAT4L,r4
@@ -297,7 +297,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_IBAT6L,r4
mtspr SPRN_IBAT7U,r4
mtspr SPRN_IBAT7L,r4
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
/* Flush all TLBs */
lis r4,0x1000
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 20885a38237a..4a2ee08af6a7 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -18,6 +18,8 @@
#include <asm/machdep.h>
#include <asm/smp.h>
+#include "cacheinfo.h"
+
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/lppaca.h>
@@ -25,8 +27,6 @@
static DEFINE_PER_CPU(struct cpu, cpu_devices);
-static DEFINE_PER_CPU(struct kobject *, cache_toplevel);
-
/*
* SMT snooze delay stuff, 64-bit only for now
*/
@@ -343,288 +343,6 @@ static struct sysdev_attribute pa6t_attrs[] = {
#endif /* HAS_PPC_PMC_PA6T */
#endif /* HAS_PPC_PMC_CLASSIC */
-struct cache_desc {
- struct kobject kobj;
- struct cache_desc *next;
- const char *type; /* Instruction, Data, or Unified */
- u32 size; /* total cache size in KB */
- u32 line_size; /* in bytes */
- u32 nr_sets; /* number of sets */
- u32 level; /* e.g. 1, 2, 3... */
- u32 associativity; /* e.g. 8-way... 0 is fully associative */
-};
-
-DEFINE_PER_CPU(struct cache_desc *, cache_desc);
-
-static struct cache_desc *kobj_to_cache_desc(struct kobject *k)
-{
- return container_of(k, struct cache_desc, kobj);
-}
-
-static void cache_desc_release(struct kobject *k)
-{
- struct cache_desc *desc = kobj_to_cache_desc(k);
-
- pr_debug("%s: releasing %s\n", __func__, kobject_name(k));
-
- if (desc->next)
- kobject_put(&desc->next->kobj);
-
- kfree(kobj_to_cache_desc(k));
-}
-
-static ssize_t cache_desc_show(struct kobject *k, struct attribute *attr, char *buf)
-{
- struct kobj_attribute *kobj_attr;
-
- kobj_attr = container_of(attr, struct kobj_attribute, attr);
-
- return kobj_attr->show(k, kobj_attr, buf);
-}
-
-static struct sysfs_ops cache_desc_sysfs_ops = {
- .show = cache_desc_show,
-};
-
-static struct kobj_type cache_desc_type = {
- .release = cache_desc_release,
- .sysfs_ops = &cache_desc_sysfs_ops,
-};
-
-static ssize_t cache_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
-{
- struct cache_desc *cache = kobj_to_cache_desc(k);
-
- return sprintf(buf, "%uK\n", cache->size);
-}
-
-static struct kobj_attribute cache_size_attr =
- __ATTR(size, 0444, cache_size_show, NULL);
-
-static ssize_t cache_line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
-{
- struct cache_desc *cache = kobj_to_cache_desc(k);
-
- return sprintf(buf, "%u\n", cache->line_size);
-}
-
-static struct kobj_attribute cache_line_size_attr =
- __ATTR(coherency_line_size, 0444, cache_line_size_show, NULL);
-
-static ssize_t cache_nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
-{
- struct cache_desc *cache = kobj_to_cache_desc(k);
-
- return sprintf(buf, "%u\n", cache->nr_sets);
-}
-
-static struct kobj_attribute cache_nr_sets_attr =
- __ATTR(number_of_sets, 0444, cache_nr_sets_show, NULL);
-
-static ssize_t cache_type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
-{
- struct cache_desc *cache = kobj_to_cache_desc(k);
-
- return sprintf(buf, "%s\n", cache->type);
-}
-
-static struct kobj_attribute cache_type_attr =
- __ATTR(type, 0444, cache_type_show, NULL);
-
-static ssize_t cache_level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
-{
- struct cache_desc *cache = kobj_to_cache_desc(k);
-
- return sprintf(buf, "%u\n", cache->level);
-}
-
-static struct kobj_attribute cache_level_attr =
- __ATTR(level, 0444, cache_level_show, NULL);
-
-static ssize_t cache_assoc_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
-{
- struct cache_desc *cache = kobj_to_cache_desc(k);
-
- return sprintf(buf, "%u\n", cache->associativity);
-}
-
-static struct kobj_attribute cache_assoc_attr =
- __ATTR(ways_of_associativity, 0444, cache_assoc_show, NULL);
-
-struct cache_desc_info {
- const char *type;
- const char *size_prop;
- const char *line_size_prop;
- const char *nr_sets_prop;
-};
-
-/* PowerPC Processor binding says the [di]-cache-* must be equal on
- * unified caches, so just use d-cache properties. */
-static struct cache_desc_info ucache_info = {
- .type = "Unified",
- .size_prop = "d-cache-size",
- .line_size_prop = "d-cache-line-size",
- .nr_sets_prop = "d-cache-sets",
-};
-
-static struct cache_desc_info dcache_info = {
- .type = "Data",
- .size_prop = "d-cache-size",
- .line_size_prop = "d-cache-line-size",
- .nr_sets_prop = "d-cache-sets",
-};
-
-static struct cache_desc_info icache_info = {
- .type = "Instruction",
- .size_prop = "i-cache-size",
- .line_size_prop = "i-cache-line-size",
- .nr_sets_prop = "i-cache-sets",
-};
-
-static struct cache_desc * __cpuinit create_cache_desc(struct device_node *np, struct kobject *parent, int index, int level, struct cache_desc_info *info)
-{
- const u32 *cache_line_size;
- struct cache_desc *new;
- const u32 *cache_size;
- const u32 *nr_sets;
- int rc;
-
- new = kzalloc(sizeof(*new), GFP_KERNEL);
- if (!new)
- return NULL;
-
- rc = kobject_init_and_add(&new->kobj, &cache_desc_type, parent,
- "index%d", index);
- if (rc)
- goto err;
-
- /* type */
- new->type = info->type;
- rc = sysfs_create_file(&new->kobj, &cache_type_attr.attr);
- WARN_ON(rc);
-
- /* level */
- new->level = level;
- rc = sysfs_create_file(&new->kobj, &cache_level_attr.attr);
- WARN_ON(rc);
-
- /* size */
- cache_size = of_get_property(np, info->size_prop, NULL);
- if (cache_size) {
- new->size = *cache_size / 1024;
- rc = sysfs_create_file(&new->kobj,
- &cache_size_attr.attr);
- WARN_ON(rc);
- }
-
- /* coherency_line_size */
- cache_line_size = of_get_property(np, info->line_size_prop, NULL);
- if (cache_line_size) {
- new->line_size = *cache_line_size;
- rc = sysfs_create_file(&new->kobj,
- &cache_line_size_attr.attr);
- WARN_ON(rc);
- }
-
- /* number_of_sets */
- nr_sets = of_get_property(np, info->nr_sets_prop, NULL);
- if (nr_sets) {
- new->nr_sets = *nr_sets;
- rc = sysfs_create_file(&new->kobj,
- &cache_nr_sets_attr.attr);
- WARN_ON(rc);
- }
-
- /* ways_of_associativity */
- if (new->nr_sets == 1) {
- /* fully associative */
- new->associativity = 0;
- goto create_assoc;
- }
-
- if (new->nr_sets && new->size && new->line_size) {
- /* If we have values for all of these we can derive
- * the associativity. */
- new->associativity =
- ((new->size * 1024) / new->nr_sets) / new->line_size;
-create_assoc:
- rc = sysfs_create_file(&new->kobj,
- &cache_assoc_attr.attr);
- WARN_ON(rc);
- }
-
- return new;
-err:
- kfree(new);
- return NULL;
-}
-
-static bool cache_is_unified(struct device_node *np)
-{
- return of_get_property(np, "cache-unified", NULL);
-}
-
-static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level)
-{
- const phandle *next_cache_phandle;
- struct device_node *next_cache;
- struct cache_desc *new, **end;
-
- pr_debug("%s(node = %s, index = %d)\n", __func__, np->full_name, index);
-
- if (cache_is_unified(np)) {
- new = create_cache_desc(np, parent, index, level,
- &ucache_info);
- } else {
- new = create_cache_desc(np, parent, index, level,
- &dcache_info);
- if (new) {
- index++;
- new->next = create_cache_desc(np, parent, index, level,
- &icache_info);
- }
- }
- if (!new)
- return NULL;
-
- end = &new->next;
- while (*end)
- end = &(*end)->next;
-
- next_cache_phandle = of_get_property(np, "l2-cache", NULL);
- if (!next_cache_phandle)
- goto out;
-
- next_cache = of_find_node_by_phandle(*next_cache_phandle);
- if (!next_cache)
- goto out;
-
- *end = create_cache_index_info(next_cache, parent, ++index, ++level);
-
- of_node_put(next_cache);
-out:
- return new;
-}
-
-static void __cpuinit create_cache_info(struct sys_device *sysdev)
-{
- struct kobject *cache_toplevel;
- struct device_node *np = NULL;
- int cpu = sysdev->id;
-
- cache_toplevel = kobject_create_and_add("cache", &sysdev->kobj);
- if (!cache_toplevel)
- return;
- per_cpu(cache_toplevel, cpu) = cache_toplevel;
- np = of_get_cpu_node(cpu, NULL);
- if (np != NULL) {
- per_cpu(cache_desc, cpu) =
- create_cache_index_info(np, cache_toplevel, 0, 1);
- of_node_put(np);
- }
- return;
-}
-
static void __cpuinit register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -689,25 +407,10 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
sysdev_create_file(s, &attr_dscr);
#endif /* CONFIG_PPC64 */
- create_cache_info(s);
+ cacheinfo_cpu_online(cpu);
}
#ifdef CONFIG_HOTPLUG_CPU
-static void remove_cache_info(struct sys_device *sysdev)
-{
- struct kobject *cache_toplevel;
- struct cache_desc *cache_desc;
- int cpu = sysdev->id;
-
- cache_desc = per_cpu(cache_desc, cpu);
- if (cache_desc != NULL)
- kobject_put(&cache_desc->kobj);
-
- cache_toplevel = per_cpu(cache_toplevel, cpu);
- if (cache_toplevel != NULL)
- kobject_put(cache_toplevel);
-}
-
static void unregister_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -774,7 +477,7 @@ static void unregister_cpu_online(unsigned int cpu)
sysdev_remove_file(s, &attr_dscr);
#endif /* CONFIG_PPC64 */
- remove_cache_info(s);
+ cacheinfo_cpu_offline(cpu);
}
#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index e2ee66b5831d..c9564031a2a9 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -164,8 +164,6 @@ static u64 tb_to_ns_scale __read_mostly;
static unsigned tb_to_ns_shift __read_mostly;
static unsigned long boot_tb __read_mostly;
-static struct gettimeofday_struct do_gtod;
-
extern struct timezone sys_tz;
static long timezone_offset;
@@ -258,8 +256,10 @@ void account_system_vtime(struct task_struct *tsk)
delta += sys_time;
get_paca()->system_time = 0;
}
- account_system_time(tsk, 0, delta);
- account_system_time_scaled(tsk, deltascaled);
+ if (in_irq() || idle_task(smp_processor_id()) != tsk)
+ account_system_time(tsk, 0, delta, deltascaled);
+ else
+ account_idle_time(delta);
per_cpu(cputime_last_delta, smp_processor_id()) = delta;
per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled;
local_irq_restore(flags);
@@ -277,10 +277,8 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
utime = get_paca()->user_time;
get_paca()->user_time = 0;
- account_user_time(tsk, utime);
-
utimescaled = cputime_to_scaled(utime);
- account_user_time_scaled(tsk, utimescaled);
+ account_user_time(tsk, utime, utimescaled);
}
/*
@@ -340,8 +338,12 @@ void calculate_steal_time(void)
tb = mftb();
purr = mfspr(SPRN_PURR);
stolen = (tb - pme->tb) - (purr - pme->purr);
- if (stolen > 0)
- account_steal_time(current, stolen);
+ if (stolen > 0) {
+ if (idle_task(smp_processor_id()) != current)
+ account_steal_time(stolen);
+ else
+ account_idle_time(stolen);
+ }
pme->tb = tb;
pme->purr = purr;
}
@@ -415,31 +417,9 @@ void udelay(unsigned long usecs)
}
EXPORT_SYMBOL(udelay);
-
-/*
- * There are two copies of tb_to_xs and stamp_xsec so that no
- * lock is needed to access and use these values in
- * do_gettimeofday. We alternate the copies and as long as a
- * reasonable time elapses between changes, there will never
- * be inconsistent values. ntpd has a minimum of one minute
- * between updates.
- */
static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
u64 new_tb_to_xs)
{
- unsigned temp_idx;
- struct gettimeofday_vars *temp_varp;
-
- temp_idx = (do_gtod.var_idx == 0);
- temp_varp = &do_gtod.vars[temp_idx];
-
- temp_varp->tb_to_xs = new_tb_to_xs;
- temp_varp->tb_orig_stamp = new_tb_stamp;
- temp_varp->stamp_xsec = new_stamp_xsec;
- smp_mb();
- do_gtod.varp = temp_varp;
- do_gtod.var_idx = temp_idx;
-
/*
* tb_update_count is used to allow the userspace gettimeofday code
* to assure itself that it sees a consistent view of the tb_to_xs and
@@ -456,6 +436,7 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
vdso_data->tb_to_xs = new_tb_to_xs;
vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
+ vdso_data->stamp_xtime = xtime;
smp_wmb();
++(vdso_data->tb_update_count);
}
@@ -514,9 +495,7 @@ static int __init iSeries_tb_recal(void)
tb_ticks_per_sec = new_tb_ticks_per_sec;
calc_cputime_factors();
div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
- do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
tb_to_xs = divres.result_low;
- do_gtod.varp->tb_to_xs = tb_to_xs;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
vdso_data->tb_to_xs = tb_to_xs;
}
@@ -869,7 +848,7 @@ static void register_decrementer_clockevent(int cpu)
struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
*dec = decrementer_clockevent;
- dec->cpumask = cpumask_of_cpu(cpu);
+ dec->cpumask = cpumask_of(cpu);
printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n",
dec->name, dec->mult, dec->shift, cpu);
@@ -988,15 +967,6 @@ void __init time_init(void)
sys_tz.tz_dsttime = 0;
}
- do_gtod.varp = &do_gtod.vars[0];
- do_gtod.var_idx = 0;
- do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
- __get_cpu_var(last_jiffy) = tb_last_jiffy;
- do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
- do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
- do_gtod.varp->tb_to_xs = tb_to_xs;
- do_gtod.tb_to_us = tb_to_us;
-
vdso_data->tb_orig_stamp = tb_last_jiffy;
vdso_data->tb_update_count = 0;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index f5def6cf5cd6..5457e9575685 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1160,37 +1160,85 @@ void CacheLockingException(struct pt_regs *regs, unsigned long address,
#ifdef CONFIG_SPE
void SPEFloatingPointException(struct pt_regs *regs)
{
+ extern int do_spe_mathemu(struct pt_regs *regs);
unsigned long spefscr;
int fpexc_mode;
int code = 0;
+ int err;
+
+ preempt_disable();
+ if (regs->msr & MSR_SPE)
+ giveup_spe(current);
+ preempt_enable();
spefscr = current->thread.spefscr;
fpexc_mode = current->thread.fpexc_mode;
- /* Hardware does not neccessarily set sticky
- * underflow/overflow/invalid flags */
if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
code = FPE_FLTOVF;
- spefscr |= SPEFSCR_FOVFS;
}
else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
code = FPE_FLTUND;
- spefscr |= SPEFSCR_FUNFS;
}
else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
code = FPE_FLTDIV;
else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
code = FPE_FLTINV;
- spefscr |= SPEFSCR_FINVS;
}
else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
code = FPE_FLTRES;
- current->thread.spefscr = spefscr;
+ err = do_spe_mathemu(regs);
+ if (err == 0) {
+ regs->nip += 4; /* skip emulated instruction */
+ emulate_single_step(regs);
+ return;
+ }
+
+ if (err == -EFAULT) {
+ /* got an error reading the instruction */
+ _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
+ } else if (err == -EINVAL) {
+ /* didn't recognize the instruction */
+ printk(KERN_ERR "unrecognized spe instruction "
+ "in %s at %lx\n", current->comm, regs->nip);
+ } else {
+ _exception(SIGFPE, regs, code, regs->nip);
+ }
- _exception(SIGFPE, regs, code, regs->nip);
return;
}
+
+void SPEFloatingPointRoundException(struct pt_regs *regs)
+{
+ extern int speround_handler(struct pt_regs *regs);
+ int err;
+
+ preempt_disable();
+ if (regs->msr & MSR_SPE)
+ giveup_spe(current);
+ preempt_enable();
+
+ regs->nip -= 4;
+ err = speround_handler(regs);
+ if (err == 0) {
+ regs->nip += 4; /* skip emulated instruction */
+ emulate_single_step(regs);
+ return;
+ }
+
+ if (err == -EFAULT) {
+ /* got an error reading the instruction */
+ _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
+ } else if (err == -EINVAL) {
+ /* didn't recognize the instruction */
+ printk(KERN_ERR "unrecognized spe instruction "
+ "in %s at %lx\n", current->comm, regs->nip);
+ } else {
+ _exception(SIGFPE, regs, 0, regs->nip);
+ return;
+ }
+}
#endif
/*
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 65639a43e644..ad06d5c75b15 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -184,8 +184,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma)
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
-int arch_setup_additional_pages(struct linux_binprm *bprm,
- int executable_stack)
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
struct page **vdso_pagelist;
@@ -567,6 +566,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
do_feature_fixups(cur_cpu_spec->cpu_features,
start64, start64 + size64);
+ start64 = find_section64(v64->hdr, "__mmu_ftr_fixup", &size64);
+ if (start64)
+ do_feature_fixups(cur_cpu_spec->mmu_features,
+ start64, start64 + size64);
+
start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64);
if (start64)
do_feature_fixups(powerpc_firmware_features,
@@ -583,6 +587,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
do_feature_fixups(cur_cpu_spec->cpu_features,
start32, start32 + size32);
+ start32 = find_section32(v32->hdr, "__mmu_ftr_fixup", &size32);
+ if (start32)
+ do_feature_fixups(cur_cpu_spec->mmu_features,
+ start32, start32 + size32);
+
#ifdef CONFIG_PPC64
start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32);
if (start32)
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 72ca26df457e..ee038d4bf252 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -16,6 +16,13 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
+/* Offset for the low 32-bit part of a field of long type */
+#ifdef CONFIG_PPC64
+#define LOPART 4
+#else
+#define LOPART 0
+#endif
+
.text
/*
* Exact prototype of gettimeofday
@@ -90,101 +97,53 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
mflr r12 /* r12 saves lr */
.cfi_register lr,r12
- mr r10,r3 /* r10 saves id */
mr r11,r4 /* r11 saves tp */
bl __get_datapage@local /* get data page */
mr r9,r3 /* datapage ptr in r9 */
- beq cr1,50f /* if monotonic -> jump there */
-
- /*
- * CLOCK_REALTIME
- */
-
- bl __do_get_xsec@local /* get xsec from tb & kernel */
- bne- 98f /* out of line -> do syscall */
-
- /* seconds are xsec >> 20 */
- rlwinm r5,r4,12,20,31
- rlwimi r5,r3,12,0,19
- stw r5,TSPC32_TV_SEC(r11)
- /* get remaining xsec and convert to nsec. we scale
- * up remaining xsec by 12 bits and get the top 32 bits
- * of the multiplication, then we multiply by 1000
- */
- rlwinm r5,r4,12,0,19
- lis r6,1000000@h
- ori r6,r6,1000000@l
- mulhwu r5,r5,r6
- mulli r5,r5,1000
- stw r5,TSPC32_TV_NSEC(r11)
- mtlr r12
- crclr cr0*4+so
- li r3,0
- blr
+50: bl __do_get_tspec@local /* get sec/nsec from tb & kernel */
+ bne cr1,80f /* not monotonic -> all done */
/*
* CLOCK_MONOTONIC
*/
-50: bl __do_get_xsec@local /* get xsec from tb & kernel */
- bne- 98f /* out of line -> do syscall */
-
- /* seconds are xsec >> 20 */
- rlwinm r6,r4,12,20,31
- rlwimi r6,r3,12,0,19
-
- /* get remaining xsec and convert to nsec. we scale
- * up remaining xsec by 12 bits and get the top 32 bits
- * of the multiplication, then we multiply by 1000
- */
- rlwinm r7,r4,12,0,19
- lis r5,1000000@h
- ori r5,r5,1000000@l
- mulhwu r7,r7,r5
- mulli r7,r7,1000
-
/* now we must fixup using wall to monotonic. We need to snapshot
* that value and do the counter trick again. Fortunately, we still
* have the counter value in r8 that was returned by __do_get_xsec.
- * At this point, r6,r7 contain our sec/nsec values, r3,r4 and r5
- * can be used
+ * At this point, r3,r4 contain our sec/nsec values, r5 and r6
+ * can be used, r7 contains NSEC_PER_SEC.
*/
- lwz r3,WTOM_CLOCK_SEC(r9)
- lwz r4,WTOM_CLOCK_NSEC(r9)
+ lwz r5,WTOM_CLOCK_SEC(r9)
+ lwz r6,WTOM_CLOCK_NSEC(r9)
- /* We now have our result in r3,r4. We create a fake dependency
- * on that result and re-check the counter
+ /* We now have our offset in r5,r6. We create a fake dependency
+ * on that value and re-check the counter
*/
- or r5,r4,r3
- xor r0,r5,r5
+ or r0,r6,r5
+ xor r0,r0,r0
add r9,r9,r0
-#ifdef CONFIG_PPC64
- lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9)
-#else
- lwz r0,(CFG_TB_UPDATE_COUNT)(r9)
-#endif
+ lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
cmpl cr0,r8,r0 /* check if updated */
bne- 50b
- /* Calculate and store result. Note that this mimmics the C code,
+ /* Calculate and store result. Note that this mimics the C code,
* which may cause funny results if nsec goes negative... is that
* possible at all ?
*/
- add r3,r3,r6
- add r4,r4,r7
- lis r5,NSEC_PER_SEC@h
- ori r5,r5,NSEC_PER_SEC@l
- cmpl cr0,r4,r5
- cmpli cr1,r4,0
+ add r3,r3,r5
+ add r4,r4,r6
+ cmpw cr0,r4,r7
+ cmpwi cr1,r4,0
blt 1f
- subf r4,r5,r4
+ subf r4,r7,r4
addi r3,r3,1
-1: bge cr1,1f
+1: bge cr1,80f
addi r3,r3,-1
- add r4,r4,r5
-1: stw r3,TSPC32_TV_SEC(r11)
+ add r4,r4,r7
+
+80: stw r3,TSPC32_TV_SEC(r11)
stw r4,TSPC32_TV_NSEC(r11)
mtlr r12
@@ -195,10 +154,6 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
/*
* syscall fallback
*/
-98:
- mtlr r12
- mr r3,r10
- mr r4,r11
99:
li r0,__NR_clock_gettime
sc
@@ -254,11 +209,7 @@ __do_get_xsec:
/* Check for update count & load values. We use the low
* order 32 bits of the update count
*/
-#ifdef CONFIG_PPC64
-1: lwz r8,(CFG_TB_UPDATE_COUNT+4)(r9)
-#else
-1: lwz r8,(CFG_TB_UPDATE_COUNT)(r9)
-#endif
+1: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
andi. r0,r8,1 /* pending update ? loop */
bne- 1b
xor r0,r8,r8 /* create dependency */
@@ -305,11 +256,7 @@ __do_get_xsec:
or r6,r4,r3
xor r0,r6,r6
add r9,r9,r0
-#ifdef CONFIG_PPC64
- lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9)
-#else
- lwz r0,(CFG_TB_UPDATE_COUNT)(r9)
-#endif
+ lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
cmpl cr0,r8,r0 /* check if updated */
bne- 1b
@@ -322,3 +269,98 @@ __do_get_xsec:
*/
3: blr
.cfi_endproc
+
+/*
+ * This is the core of clock_gettime(), it returns the current
+ * time in seconds and nanoseconds in r3 and r4.
+ * It expects the datapage ptr in r9 and doesn't clobber it.
+ * It clobbers r0, r5, r6, r10 and returns NSEC_PER_SEC in r7.
+ * On return, r8 contains the counter value that can be reused.
+ * This clobbers cr0 but not any other cr field.
+ */
+__do_get_tspec:
+ .cfi_startproc
+ /* Check for update count & load values. We use the low
+ * order 32 bits of the update count
+ */
+1: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
+ andi. r0,r8,1 /* pending update ? loop */
+ bne- 1b
+ xor r0,r8,r8 /* create dependency */
+ add r9,r9,r0
+
+ /* Load orig stamp (offset to TB) */
+ lwz r5,CFG_TB_ORIG_STAMP(r9)
+ lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
+
+ /* Get a stable TB value */
+2: mftbu r3
+ mftbl r4
+ mftbu r0
+ cmpl cr0,r3,r0
+ bne- 2b
+
+ /* Subtract tb orig stamp and shift left 12 bits.
+ */
+ subfc r7,r6,r4
+ subfe r0,r5,r3
+ slwi r0,r0,12
+ rlwimi. r0,r7,12,20,31
+ slwi r7,r7,12
+
+ /* Load scale factor & do multiplication */
+ lwz r5,CFG_TB_TO_XS(r9) /* load values */
+ lwz r6,(CFG_TB_TO_XS+4)(r9)
+ mulhwu r3,r7,r6
+ mullw r10,r7,r5
+ mulhwu r4,r7,r5
+ addc r10,r3,r10
+ li r3,0
+
+ beq+ 4f /* skip high part computation if 0 */
+ mulhwu r3,r0,r5
+ mullw r7,r0,r5
+ mulhwu r5,r0,r6
+ mullw r6,r0,r6
+ adde r4,r4,r7
+ addze r3,r3
+ addc r4,r4,r5
+ addze r3,r3
+ addc r10,r10,r6
+
+4: addze r4,r4 /* add in carry */
+ lis r7,NSEC_PER_SEC@h
+ ori r7,r7,NSEC_PER_SEC@l
+ mulhwu r4,r4,r7 /* convert to nanoseconds */
+
+ /* At this point, we have seconds & nanoseconds since the xtime
+ * stamp in r3+CA and r4. Load & add the xtime stamp.
+ */
+#ifdef CONFIG_PPC64
+ lwz r5,STAMP_XTIME+TSPC64_TV_SEC+LOPART(r9)
+ lwz r6,STAMP_XTIME+TSPC64_TV_NSEC+LOPART(r9)
+#else
+ lwz r5,STAMP_XTIME+TSPC32_TV_SEC(r9)
+ lwz r6,STAMP_XTIME+TSPC32_TV_NSEC(r9)
+#endif
+ add r4,r4,r6
+ adde r3,r3,r5
+
+ /* We now have our result in r3,r4. We create a fake dependency
+ * on that result and re-check the counter
+ */
+ or r6,r4,r3
+ xor r0,r6,r6
+ add r9,r9,r0
+ lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
+ cmpl cr0,r8,r0 /* check if updated */
+ bne- 1b
+
+ /* check for nanosecond overflow and adjust if necessary */
+ cmpw r4,r7
+ bltlr /* all done if no overflow */
+ subf r4,r7,r4 /* adjust if overflow */
+ addi r3,r3,1
+
+ blr
+ .cfi_endproc
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index be3b6a41dc09..904ef1360dd7 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -34,6 +34,9 @@ SECTIONS
__ftr_fixup : { *(__ftr_fixup) }
. = ALIGN(8);
+ __mmu_ftr_fixup : { *(__mmu_ftr_fixup) }
+
+ . = ALIGN(8);
__lwsync_fixup : { *(__lwsync_fixup) }
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index c6401f9e37f1..262cd5857a56 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -75,90 +75,49 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
mflr r12 /* r12 saves lr */
.cfi_register lr,r12
- mr r10,r3 /* r10 saves id */
mr r11,r4 /* r11 saves tp */
bl V_LOCAL_FUNC(__get_datapage) /* get data page */
- beq cr1,50f /* if monotonic -> jump there */
-
- /*
- * CLOCK_REALTIME
- */
-
- bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
-
- lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
- ori r7,r7,16960
- rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
- rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
- std r5,TSPC64_TV_SEC(r11) /* store sec in tv */
- subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
- mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
- * XSEC_PER_SEC
- */
- rldicl r0,r0,44,20
- mulli r0,r0,1000 /* nsec = usec * 1000 */
- std r0,TSPC64_TV_NSEC(r11) /* store nsec in tp */
-
- mtlr r12
- crclr cr0*4+so
- li r3,0
- blr
+50: bl V_LOCAL_FUNC(__do_get_tspec) /* get time from tb & kernel */
+ bne cr1,80f /* if not monotonic, all done */
/*
* CLOCK_MONOTONIC
*/
-50: bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
-
- lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
- ori r7,r7,16960
- rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
- rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
- subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
- mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
- * XSEC_PER_SEC
- */
- rldicl r6,r0,44,20
- mulli r6,r6,1000 /* nsec = usec * 1000 */
-
/* now we must fixup using wall to monotonic. We need to snapshot
* that value and do the counter trick again. Fortunately, we still
- * have the counter value in r8 that was returned by __do_get_xsec.
- * At this point, r5,r6 contain our sec/nsec values.
- * can be used
+ * have the counter value in r8 that was returned by __do_get_tspec.
+ * At this point, r4,r5 contain our sec/nsec values.
*/
- lwa r4,WTOM_CLOCK_SEC(r3)
- lwa r7,WTOM_CLOCK_NSEC(r3)
+ lwa r6,WTOM_CLOCK_SEC(r3)
+ lwa r9,WTOM_CLOCK_NSEC(r3)
- /* We now have our result in r4,r7. We create a fake dependency
+ /* We now have our result in r6,r9. We create a fake dependency
* on that result and re-check the counter
*/
- or r9,r4,r7
- xor r0,r9,r9
+ or r0,r6,r9
+ xor r0,r0,r0
add r3,r3,r0
ld r0,CFG_TB_UPDATE_COUNT(r3)
cmpld cr0,r0,r8 /* check if updated */
bne- 50b
- /* Calculate and store result. Note that this mimmics the C code,
- * which may cause funny results if nsec goes negative... is that
- * possible at all ?
+ /* Add wall->monotonic offset and check for overflow or underflow.
*/
- add r4,r4,r5
- add r7,r7,r6
- lis r9,NSEC_PER_SEC@h
- ori r9,r9,NSEC_PER_SEC@l
- cmpl cr0,r7,r9
- cmpli cr1,r7,0
+ add r4,r4,r6
+ add r5,r5,r9
+ cmpd cr0,r5,r7
+ cmpdi cr1,r5,0
blt 1f
- subf r7,r9,r7
+ subf r5,r7,r5
addi r4,r4,1
-1: bge cr1,1f
+1: bge cr1,80f
addi r4,r4,-1
- add r7,r7,r9
-1: std r4,TSPC64_TV_SEC(r11)
- std r7,TSPC64_TV_NSEC(r11)
+ add r5,r5,r7
+
+80: std r4,TSPC64_TV_SEC(r11)
+ std r5,TSPC64_TV_NSEC(r11)
mtlr r12
crclr cr0*4+so
@@ -168,10 +127,6 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
/*
* syscall fallback
*/
-98:
- mtlr r12
- mr r3,r10
- mr r4,r11
99:
li r0,__NR_clock_gettime
sc
@@ -253,3 +208,59 @@ V_FUNCTION_BEGIN(__do_get_xsec)
blr
.cfi_endproc
V_FUNCTION_END(__do_get_xsec)
+
+/*
+ * This is the core of clock_gettime(), it returns the current
+ * time in seconds and nanoseconds in r4 and r5.
+ * It expects the datapage ptr in r3 and doesn't clobber it.
+ * It clobbers r0 and r6 and returns NSEC_PER_SEC in r7.
+ * On return, r8 contains the counter value that can be reused.
+ * This clobbers cr0 but not any other cr field.
+ */
+V_FUNCTION_BEGIN(__do_get_tspec)
+ .cfi_startproc
+ /* check for update count & load values */
+1: ld r8,CFG_TB_UPDATE_COUNT(r3)
+ andi. r0,r8,1 /* pending update ? loop */
+ bne- 1b
+ xor r0,r8,r8 /* create dependency */
+ add r3,r3,r0
+
+ /* Get TB & offset it. We use the MFTB macro which will generate
+ * workaround code for Cell.
+ */
+ MFTB(r7)
+ ld r9,CFG_TB_ORIG_STAMP(r3)
+ subf r7,r9,r7
+
+ /* Scale result */
+ ld r5,CFG_TB_TO_XS(r3)
+ sldi r7,r7,12 /* compute time since stamp_xtime */
+ mulhdu r6,r7,r5 /* in units of 2^-32 seconds */
+
+ /* Add stamp since epoch */
+ ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
+ ld r5,STAMP_XTIME+TSPC64_TV_NSEC(r3)
+ or r0,r4,r5
+ or r0,r0,r6
+ xor r0,r0,r0
+ add r3,r3,r0
+ ld r0,CFG_TB_UPDATE_COUNT(r3)
+ cmpld r0,r8 /* check if updated */
+ bne- 1b /* reload if so */
+
+ /* convert to seconds & nanoseconds and add to stamp */
+ lis r7,NSEC_PER_SEC@h
+ ori r7,r7,NSEC_PER_SEC@l
+ mulhwu r0,r6,r7 /* compute nanoseconds and */
+ srdi r6,r6,32 /* seconds since stamp_xtime */
+ clrldi r0,r0,32
+ add r5,r5,r0 /* add nanoseconds together */
+ cmpd r5,r7 /* overflow? */
+ add r4,r4,r6
+ bltlr /* all done if no overflow */
+ subf r5,r7,r5 /* if overflow, adjust */
+ addi r4,r4,1
+ blr
+ .cfi_endproc
+V_FUNCTION_END(__do_get_tspec)
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index d0b2526dd38d..0e615404e247 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -35,6 +35,9 @@ SECTIONS
__ftr_fixup : { *(__ftr_fixup) }
. = ALIGN(8);
+ __mmu_ftr_fixup : { *(__mmu_ftr_fixup) }
+
+ . = ALIGN(8);
__lwsync_fixup : { *(__lwsync_fixup) }
. = ALIGN(8);
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index a11e6bc59b30..94aa7b011b27 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -41,9 +41,9 @@
static struct bus_type vio_bus_type;
static struct vio_dev vio_bus_device = { /* fake "parent" device */
- .name = vio_bus_device.dev.bus_id,
+ .name = "vio",
.type = "",
- .dev.bus_id = "vio",
+ .dev.init_name = "vio",
.dev.bus = &vio_bus_type,
};
@@ -1216,7 +1216,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
viodev->irq = irq_of_parse_and_map(of_node, 0);
- snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
+ dev_set_name(&viodev->dev, "%x", *unit_address);
viodev->name = of_node->name;
viodev->type = of_node->type;
viodev->unit_address = *unit_address;
@@ -1243,7 +1243,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
/* register with generic device framework */
if (device_register(&viodev->dev)) {
printk(KERN_ERR "%s: failed to register device %s\n",
- __func__, viodev->dev.bus_id);
+ __func__, dev_name(&viodev->dev));
/* XXX free TCE table */
kfree(viodev);
return NULL;
@@ -1400,13 +1400,13 @@ static struct vio_dev *vio_find_name(const char *name)
struct vio_dev *vio_find_node(struct device_node *vnode)
{
const uint32_t *unit_address;
- char kobj_name[BUS_ID_SIZE];
+ char kobj_name[20];
/* construct the kobject name from the device node */
unit_address = of_get_property(vnode, "reg", NULL);
if (!unit_address)
return NULL;
- snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address);
+ snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address);
return vio_find_name(kobj_name);
}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 2412c056baa4..47bf15cd2c9e 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -152,6 +152,12 @@ SECTIONS
__stop___ftr_fixup = .;
}
. = ALIGN(8);
+ __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
+ __start___mmu_ftr_fixup = .;
+ *(__mmu_ftr_fixup)
+ __stop___mmu_ftr_fixup = .;
+ }
+ . = ALIGN(8);
__lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
__start___lwsync_fixup = .;
*(__lwsync_fixup)
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
new file mode 100644
index 000000000000..a66bec57265a
--- /dev/null
+++ b/arch/powerpc/kvm/44x.c
@@ -0,0 +1,228 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/err.h>
+
+#include <asm/reg.h>
+#include <asm/cputable.h>
+#include <asm/tlbflush.h>
+#include <asm/kvm_44x.h>
+#include <asm/kvm_ppc.h>
+
+#include "44x_tlb.h"
+
+/* Note: clearing MSR[DE] just means that the debug interrupt will not be
+ * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
+ * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
+ * will be delivered as an "imprecise debug event" (which is indicated by
+ * DBSR[IDE].
+ */
+static void kvm44x_disable_debug_interrupts(void)
+{
+ mtmsr(mfmsr() & ~MSR_DE);
+}
+
+void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
+{
+ kvm44x_disable_debug_interrupts();
+
+ mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
+ mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
+ mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
+ mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
+ mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
+ mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
+ mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
+ mtmsr(vcpu->arch.host_msr);
+}
+
+void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
+{
+ struct kvm_guest_debug *dbg = &vcpu->guest_debug;
+ u32 dbcr0 = 0;
+
+ vcpu->arch.host_msr = mfmsr();
+ kvm44x_disable_debug_interrupts();
+
+ /* Save host debug register state. */
+ vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
+ vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
+ vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
+ vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
+ vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
+ vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
+ vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
+
+ /* set registers up for guest */
+
+ if (dbg->bp[0]) {
+ mtspr(SPRN_IAC1, dbg->bp[0]);
+ dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
+ }
+ if (dbg->bp[1]) {
+ mtspr(SPRN_IAC2, dbg->bp[1]);
+ dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
+ }
+ if (dbg->bp[2]) {
+ mtspr(SPRN_IAC3, dbg->bp[2]);
+ dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
+ }
+ if (dbg->bp[3]) {
+ mtspr(SPRN_IAC4, dbg->bp[3]);
+ dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
+ }
+
+ mtspr(SPRN_DBCR0, dbcr0);
+ mtspr(SPRN_DBCR1, 0);
+ mtspr(SPRN_DBCR2, 0);
+}
+
+void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ kvmppc_44x_tlb_load(vcpu);
+}
+
+void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ kvmppc_44x_tlb_put(vcpu);
+}
+
+int kvmppc_core_check_processor_compat(void)
+{
+ int r;
+
+ if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
+ r = 0;
+ else
+ r = -ENOTSUPP;
+
+ return r;
+}
+
+int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
+ struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0];
+ int i;
+
+ tlbe->tid = 0;
+ tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
+ tlbe->word1 = 0;
+ tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
+
+ tlbe++;
+ tlbe->tid = 0;
+ tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
+ tlbe->word1 = 0xef600000;
+ tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
+ | PPC44x_TLB_I | PPC44x_TLB_G;
+
+ /* Since the guest can directly access the timebase, it must know the
+ * real timebase frequency. Accordingly, it must see the state of
+ * CCR1[TCS]. */
+ vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
+
+ for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++)
+ vcpu_44x->shadow_refs[i].gtlb_index = -1;
+
+ return 0;
+}
+
+/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
+int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+ struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
+ struct kvmppc_44x_tlbe *gtlbe;
+ int index;
+ gva_t eaddr;
+ u8 pid;
+ u8 as;
+
+ eaddr = tr->linear_address;
+ pid = (tr->linear_address >> 32) & 0xff;
+ as = (tr->linear_address >> 40) & 0x1;
+
+ index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
+ if (index == -1) {
+ tr->valid = 0;
+ return 0;
+ }
+
+ gtlbe = &vcpu_44x->guest_tlb[index];
+
+ tr->physical_address = tlb_xlate(gtlbe, eaddr);
+ /* XXX what does "writeable" and "usermode" even mean? */
+ tr->valid = 1;
+
+ return 0;
+}
+
+struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ struct kvmppc_vcpu_44x *vcpu_44x;
+ struct kvm_vcpu *vcpu;
+ int err;
+
+ vcpu_44x = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ if (!vcpu_44x) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ vcpu = &vcpu_44x->vcpu;
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_vcpu;
+
+ return vcpu;
+
+free_vcpu:
+ kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
+out:
+ return ERR_PTR(err);
+}
+
+void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
+
+ kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
+}
+
+static int kvmppc_44x_init(void)
+{
+ int r;
+
+ r = kvmppc_booke_init();
+ if (r)
+ return r;
+
+ return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE);
+}
+
+static void kvmppc_44x_exit(void)
+{
+ kvmppc_booke_exit();
+}
+
+module_init(kvmppc_44x_init);
+module_exit(kvmppc_44x_exit);
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
new file mode 100644
index 000000000000..82489a743a6f
--- /dev/null
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -0,0 +1,371 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <asm/kvm_ppc.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/disassemble.h>
+#include <asm/kvm_44x.h>
+#include "timing.h"
+
+#include "booke.h"
+#include "44x_tlb.h"
+
+#define OP_RFI 19
+
+#define XOP_RFI 50
+#define XOP_MFMSR 83
+#define XOP_WRTEE 131
+#define XOP_MTMSR 146
+#define XOP_WRTEEI 163
+#define XOP_MFDCR 323
+#define XOP_MTDCR 451
+#define XOP_TLBSX 914
+#define XOP_ICCCI 966
+#define XOP_TLBWE 978
+
+static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.pc = vcpu->arch.srr0;
+ kvmppc_set_msr(vcpu, vcpu->arch.srr1);
+}
+
+int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance)
+{
+ int emulated = EMULATE_DONE;
+ int dcrn;
+ int ra;
+ int rb;
+ int rc;
+ int rs;
+ int rt;
+ int ws;
+
+ switch (get_op(inst)) {
+ case OP_RFI:
+ switch (get_xop(inst)) {
+ case XOP_RFI:
+ kvmppc_emul_rfi(vcpu);
+ kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS);
+ *advance = 0;
+ break;
+
+ default:
+ emulated = EMULATE_FAIL;
+ break;
+ }
+ break;
+
+ case 31:
+ switch (get_xop(inst)) {
+
+ case XOP_MFMSR:
+ rt = get_rt(inst);
+ vcpu->arch.gpr[rt] = vcpu->arch.msr;
+ kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
+ break;
+
+ case XOP_MTMSR:
+ rs = get_rs(inst);
+ kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
+ kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
+ break;
+
+ case XOP_WRTEE:
+ rs = get_rs(inst);
+ vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
+ | (vcpu->arch.gpr[rs] & MSR_EE);
+ kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
+ break;
+
+ case XOP_WRTEEI:
+ vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
+ | (inst & MSR_EE);
+ kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
+ break;
+
+ case XOP_MFDCR:
+ dcrn = get_dcrn(inst);
+ rt = get_rt(inst);
+
+ /* The guest may access CPR0 registers to determine the timebase
+ * frequency, and it must know the real host frequency because it
+ * can directly access the timebase registers.
+ *
+ * It would be possible to emulate those accesses in userspace,
+ * but userspace can really only figure out the end frequency.
+ * We could decompose that into the factors that compute it, but
+ * that's tricky math, and it's easier to just report the real
+ * CPR0 values.
+ */
+ switch (dcrn) {
+ case DCRN_CPR0_CONFIG_ADDR:
+ vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
+ break;
+ case DCRN_CPR0_CONFIG_DATA:
+ local_irq_disable();
+ mtdcr(DCRN_CPR0_CONFIG_ADDR,
+ vcpu->arch.cpr0_cfgaddr);
+ vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
+ local_irq_enable();
+ break;
+ default:
+ run->dcr.dcrn = dcrn;
+ run->dcr.data = 0;
+ run->dcr.is_write = 0;
+ vcpu->arch.io_gpr = rt;
+ vcpu->arch.dcr_needed = 1;
+ kvmppc_account_exit(vcpu, DCR_EXITS);
+ emulated = EMULATE_DO_DCR;
+ }
+
+ break;
+
+ case XOP_MTDCR:
+ dcrn = get_dcrn(inst);
+ rs = get_rs(inst);
+
+ /* emulate some access in kernel */
+ switch (dcrn) {
+ case DCRN_CPR0_CONFIG_ADDR:
+ vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
+ break;
+ default:
+ run->dcr.dcrn = dcrn;
+ run->dcr.data = vcpu->arch.gpr[rs];
+ run->dcr.is_write = 1;
+ vcpu->arch.dcr_needed = 1;
+ kvmppc_account_exit(vcpu, DCR_EXITS);
+ emulated = EMULATE_DO_DCR;
+ }
+
+ break;
+
+ case XOP_TLBWE:
+ ra = get_ra(inst);
+ rs = get_rs(inst);
+ ws = get_ws(inst);
+ emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
+ break;
+
+ case XOP_TLBSX:
+ rt = get_rt(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+ rc = get_rc(inst);
+ emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
+ break;
+
+ case XOP_ICCCI:
+ break;
+
+ default:
+ emulated = EMULATE_FAIL;
+ }
+
+ break;
+
+ default:
+ emulated = EMULATE_FAIL;
+ }
+
+ return emulated;
+}
+
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+{
+ switch (sprn) {
+ case SPRN_MMUCR:
+ vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
+ case SPRN_PID:
+ kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
+ case SPRN_CCR0:
+ vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
+ case SPRN_CCR1:
+ vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
+ case SPRN_DEAR:
+ vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
+ case SPRN_ESR:
+ vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
+ case SPRN_DBCR0:
+ vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
+ case SPRN_DBCR1:
+ vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
+ case SPRN_TSR:
+ vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
+ case SPRN_TCR:
+ vcpu->arch.tcr = vcpu->arch.gpr[rs];
+ kvmppc_emulate_dec(vcpu);
+ break;
+
+ /* Note: SPRG4-7 are user-readable. These values are
+ * loaded into the real SPRGs when resuming the
+ * guest. */
+ case SPRN_SPRG4:
+ vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG5:
+ vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG6:
+ vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG7:
+ vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
+
+ case SPRN_IVPR:
+ vcpu->arch.ivpr = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR0:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR1:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR2:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR3:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR4:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR5:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR6:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR7:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR8:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR9:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR10:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR11:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR12:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR13:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR14:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs];
+ break;
+ case SPRN_IVOR15:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs];
+ break;
+
+ default:
+ return EMULATE_FAIL;
+ }
+
+ kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
+ return EMULATE_DONE;
+}
+
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+{
+ switch (sprn) {
+ /* 440 */
+ case SPRN_MMUCR:
+ vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
+ case SPRN_CCR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
+ case SPRN_CCR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
+
+ /* Book E */
+ case SPRN_PID:
+ vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
+ case SPRN_IVPR:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
+ case SPRN_DEAR:
+ vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
+ case SPRN_ESR:
+ vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
+ case SPRN_DBCR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
+ case SPRN_DBCR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
+
+ case SPRN_IVOR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
+ break;
+ case SPRN_IVOR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
+ break;
+ case SPRN_IVOR2:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
+ break;
+ case SPRN_IVOR3:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
+ break;
+ case SPRN_IVOR4:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
+ break;
+ case SPRN_IVOR5:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
+ break;
+ case SPRN_IVOR6:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
+ break;
+ case SPRN_IVOR7:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
+ break;
+ case SPRN_IVOR8:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
+ break;
+ case SPRN_IVOR9:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
+ break;
+ case SPRN_IVOR10:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
+ break;
+ case SPRN_IVOR11:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
+ break;
+ case SPRN_IVOR12:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
+ break;
+ case SPRN_IVOR13:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
+ break;
+ case SPRN_IVOR14:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
+ break;
+ case SPRN_IVOR15:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
+ break;
+
+ default:
+ return EMULATE_FAIL;
+ }
+
+ kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
+ return EMULATE_DONE;
+}
+
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index ad72c6f9811f..9a34b8edb9e2 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -22,20 +22,103 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/highmem.h>
+
+#include <asm/tlbflush.h>
#include <asm/mmu-44x.h>
#include <asm/kvm_ppc.h>
+#include <asm/kvm_44x.h>
+#include "timing.h"
#include "44x_tlb.h"
+#ifndef PPC44x_TLBE_SIZE
+#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
+#endif
+
+#define PAGE_SIZE_4K (1<<12)
+#define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1))
+
+#define PPC44x_TLB_UATTR_MASK \
+ (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3)
#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
-static unsigned int kvmppc_tlb_44x_pos;
+#ifdef DEBUG
+void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_44x_tlbe *tlbe;
+ int i;
+
+ printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
+ printk("| %2s | %3s | %8s | %8s | %8s |\n",
+ "nr", "tid", "word0", "word1", "word2");
+
+ for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
+ tlbe = &vcpu_44x->guest_tlb[i];
+ if (tlbe->word0 & PPC44x_TLB_VALID)
+ printk(" G%2d | %02X | %08X | %08X | %08X |\n",
+ i, tlbe->tid, tlbe->word0, tlbe->word1,
+ tlbe->word2);
+ }
+}
+#endif
+
+static inline void kvmppc_44x_tlbie(unsigned int index)
+{
+ /* 0 <= index < 64, so the V bit is clear and we can use the index as
+ * word0. */
+ asm volatile(
+ "tlbwe %[index], %[index], 0\n"
+ :
+ : [index] "r"(index)
+ );
+}
+
+static inline void kvmppc_44x_tlbre(unsigned int index,
+ struct kvmppc_44x_tlbe *tlbe)
+{
+ asm volatile(
+ "tlbre %[word0], %[index], 0\n"
+ "mfspr %[tid], %[sprn_mmucr]\n"
+ "andi. %[tid], %[tid], 0xff\n"
+ "tlbre %[word1], %[index], 1\n"
+ "tlbre %[word2], %[index], 2\n"
+ : [word0] "=r"(tlbe->word0),
+ [word1] "=r"(tlbe->word1),
+ [word2] "=r"(tlbe->word2),
+ [tid] "=r"(tlbe->tid)
+ : [index] "r"(index),
+ [sprn_mmucr] "i"(SPRN_MMUCR)
+ : "cc"
+ );
+}
+
+static inline void kvmppc_44x_tlbwe(unsigned int index,
+ struct kvmppc_44x_tlbe *stlbe)
+{
+ unsigned long tmp;
+
+ asm volatile(
+ "mfspr %[tmp], %[sprn_mmucr]\n"
+ "rlwimi %[tmp], %[tid], 0, 0xff\n"
+ "mtspr %[sprn_mmucr], %[tmp]\n"
+ "tlbwe %[word0], %[index], 0\n"
+ "tlbwe %[word1], %[index], 1\n"
+ "tlbwe %[word2], %[index], 2\n"
+ : [tmp] "=&r"(tmp)
+ : [word0] "r"(stlbe->word0),
+ [word1] "r"(stlbe->word1),
+ [word2] "r"(stlbe->word2),
+ [tid] "r"(stlbe->tid),
+ [index] "r"(index),
+ [sprn_mmucr] "i"(SPRN_MMUCR)
+ );
+}
static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
{
- /* Mask off reserved bits. */
- attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK;
+ /* We only care about the guest's permission and user bits. */
+ attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK;
if (!usermode) {
/* Guest is in supervisor mode, so we need to translate guest
@@ -47,18 +130,60 @@ static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
/* Make sure host can always access this memory. */
attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
+ /* WIMGE = 0b00100 */
+ attrib |= PPC44x_TLB_M;
+
return attrib;
}
+/* Load shadow TLB back into hardware. */
+void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
+ int i;
+
+ for (i = 0; i <= tlb_44x_hwater; i++) {
+ struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
+
+ if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
+ kvmppc_44x_tlbwe(i, stlbe);
+ }
+}
+
+static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x,
+ unsigned int i)
+{
+ vcpu_44x->shadow_tlb_mod[i] = 1;
+}
+
+/* Save hardware TLB to the vcpu, and invalidate all guest mappings. */
+void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
+ int i;
+
+ for (i = 0; i <= tlb_44x_hwater; i++) {
+ struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
+
+ if (vcpu_44x->shadow_tlb_mod[i])
+ kvmppc_44x_tlbre(i, stlbe);
+
+ if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
+ kvmppc_44x_tlbie(i);
+ }
+}
+
+
/* Search the guest TLB for a matching entry. */
int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
unsigned int as)
{
+ struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
int i;
/* XXX Replace loop with fancy data structures. */
- for (i = 0; i < PPC44x_TLB_SIZE; i++) {
- struct tlbe *tlbe = &vcpu->arch.guest_tlb[i];
+ for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
+ struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
unsigned int tid;
if (eaddr < get_tlb_eaddr(tlbe))
@@ -83,78 +208,89 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
return -1;
}
-struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
+int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
{
unsigned int as = !!(vcpu->arch.msr & MSR_IS);
- unsigned int index;
- index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
- if (index == -1)
- return NULL;
- return &vcpu->arch.guest_tlb[index];
+ return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
}
-struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
+int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
{
unsigned int as = !!(vcpu->arch.msr & MSR_DS);
- unsigned int index;
- index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
- if (index == -1)
- return NULL;
- return &vcpu->arch.guest_tlb[index];
+ return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
}
-static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
+static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
+ unsigned int stlb_index)
{
- return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
-}
+ struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index];
-static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
- unsigned int index)
-{
- struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
- struct page *page = vcpu->arch.shadow_pages[index];
+ if (!ref->page)
+ return;
- if (get_tlb_v(stlbe)) {
- if (kvmppc_44x_tlbe_is_writable(stlbe))
- kvm_release_page_dirty(page);
- else
- kvm_release_page_clean(page);
- }
+ /* Discard from the TLB. */
+ /* Note: we could actually invalidate a host mapping, if the host overwrote
+ * this TLB entry since we inserted a guest mapping. */
+ kvmppc_44x_tlbie(stlb_index);
+
+ /* Now release the page. */
+ if (ref->writeable)
+ kvm_release_page_dirty(ref->page);
+ else
+ kvm_release_page_clean(ref->page);
+
+ ref->page = NULL;
+
+ /* XXX set tlb_44x_index to stlb_index? */
+
+ KVMTRACE_1D(STLB_INVAL, &vcpu_44x->vcpu, stlb_index, handler);
}
void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu)
{
+ struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
int i;
for (i = 0; i <= tlb_44x_hwater; i++)
- kvmppc_44x_shadow_release(vcpu, i);
-}
-
-void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
-{
- vcpu->arch.shadow_tlb_mod[i] = 1;
+ kvmppc_44x_shadow_release(vcpu_44x, i);
}
-/* Caller must ensure that the specified guest TLB entry is safe to insert into
- * the shadow TLB. */
-void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
- u32 flags)
+/**
+ * kvmppc_mmu_map -- create a host mapping for guest memory
+ *
+ * If the guest wanted a larger page than the host supports, only the first
+ * host page is mapped here and the rest are demand faulted.
+ *
+ * If the guest wanted a smaller page than the host page size, we map only the
+ * guest-size page (i.e. not a full host page mapping).
+ *
+ * Caller must ensure that the specified guest TLB entry is safe to insert into
+ * the shadow TLB.
+ */
+void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid,
+ u32 flags, u32 max_bytes, unsigned int gtlb_index)
{
+ struct kvmppc_44x_tlbe stlbe;
+ struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
+ struct kvmppc_44x_shadow_ref *ref;
struct page *new_page;
- struct tlbe *stlbe;
hpa_t hpaddr;
+ gfn_t gfn;
unsigned int victim;
- /* Future optimization: don't overwrite the TLB entry containing the
- * current PC (or stack?). */
- victim = kvmppc_tlb_44x_pos++;
- if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
- kvmppc_tlb_44x_pos = 0;
- stlbe = &vcpu->arch.shadow_tlb[victim];
+ /* Select TLB entry to clobber. Indirectly guard against races with the TLB
+ * miss handler by disabling interrupts. */
+ local_irq_disable();
+ victim = ++tlb_44x_index;
+ if (victim > tlb_44x_hwater)
+ victim = 0;
+ tlb_44x_index = victim;
+ local_irq_enable();
/* Get reference to new page. */
+ gfn = gpaddr >> PAGE_SHIFT;
new_page = gfn_to_page(vcpu->kvm, gfn);
if (is_error_page(new_page)) {
printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
@@ -163,10 +299,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
}
hpaddr = page_to_phys(new_page);
- /* Drop reference to old page. */
- kvmppc_44x_shadow_release(vcpu, victim);
-
- vcpu->arch.shadow_pages[victim] = new_page;
+ /* Invalidate any previous shadow mappings. */
+ kvmppc_44x_shadow_release(vcpu_44x, victim);
/* XXX Make sure (va, size) doesn't overlap any other
* entries. 440x6 user manual says the result would be
@@ -174,78 +308,193 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
/* XXX what about AS? */
- stlbe->tid = !(asid & 0xff);
-
/* Force TS=1 for all guest mappings. */
- /* For now we hardcode 4KB mappings, but it will be important to
- * use host large pages in the future. */
- stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
- | PPC44x_TLB_4K;
- stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
- stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
- vcpu->arch.msr & MSR_PR);
- kvmppc_tlbe_set_modified(vcpu, victim);
+ stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS;
+
+ if (max_bytes >= PAGE_SIZE) {
+ /* Guest mapping is larger than or equal to host page size. We can use
+ * a "native" host mapping. */
+ stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE;
+ } else {
+ /* Guest mapping is smaller than host page size. We must restrict the
+ * size of the mapping to be at most the smaller of the two, but for
+ * simplicity we fall back to a 4K mapping (this is probably what the
+ * guest is using anyways). */
+ stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K;
+
+ /* 'hpaddr' is a host page, which is larger than the mapping we're
+ * inserting here. To compensate, we must add the in-page offset to the
+ * sub-page. */
+ hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K);
+ }
- KVMTRACE_5D(STLB_WRITE, vcpu, victim,
- stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2,
- handler);
+ stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
+ stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
+ vcpu->arch.msr & MSR_PR);
+ stlbe.tid = !(asid & 0xff);
+
+ /* Keep track of the reference so we can properly release it later. */
+ ref = &vcpu_44x->shadow_refs[victim];
+ ref->page = new_page;
+ ref->gtlb_index = gtlb_index;
+ ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW);
+ ref->tid = stlbe.tid;
+
+ /* Insert shadow mapping into hardware TLB. */
+ kvmppc_44x_tlbe_set_modified(vcpu_44x, victim);
+ kvmppc_44x_tlbwe(victim, &stlbe);
+ KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1,
+ stlbe.word2, handler);
}
-void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
- gva_t eend, u32 asid)
+/* For a particular guest TLB entry, invalidate the corresponding host TLB
+ * mappings and release the host pages. */
+static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
+ unsigned int gtlb_index)
{
- unsigned int pid = !(asid & 0xff);
+ struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
int i;
- /* XXX Replace loop with fancy data structures. */
- for (i = 0; i <= tlb_44x_hwater; i++) {
- struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
- unsigned int tid;
+ for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
+ struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
+ if (ref->gtlb_index == gtlb_index)
+ kvmppc_44x_shadow_release(vcpu_44x, i);
+ }
+}
- if (!get_tlb_v(stlbe))
- continue;
+void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
+{
+ vcpu->arch.shadow_pid = !usermode;
+}
- if (eend < get_tlb_eaddr(stlbe))
- continue;
+void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
+{
+ struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
+ int i;
- if (eaddr > get_tlb_end(stlbe))
- continue;
+ if (unlikely(vcpu->arch.pid == new_pid))
+ return;
- tid = get_tlb_tid(stlbe);
- if (tid && (tid != pid))
- continue;
+ vcpu->arch.pid = new_pid;
- kvmppc_44x_shadow_release(vcpu, i);
- stlbe->word0 = 0;
- kvmppc_tlbe_set_modified(vcpu, i);
- KVMTRACE_5D(STLB_INVAL, vcpu, i,
- stlbe->tid, stlbe->word0, stlbe->word1,
- stlbe->word2, handler);
+ /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
+ * can't access guest kernel mappings (TID=1). When we switch to a new
+ * guest PID, which will also use host PID=0, we must discard the old guest
+ * userspace mappings. */
+ for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
+ struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
+
+ if (ref->tid == 0)
+ kvmppc_44x_shadow_release(vcpu_44x, i);
}
}
-/* Invalidate all mappings on the privilege switch after PID has been changed.
- * The guest always runs with PID=1, so we must clear the entire TLB when
- * switching address spaces. */
-void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
+static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
+ const struct kvmppc_44x_tlbe *tlbe)
{
- int i;
+ gpa_t gpa;
- if (vcpu->arch.swap_pid) {
- /* XXX Replace loop with fancy data structures. */
- for (i = 0; i <= tlb_44x_hwater; i++) {
- struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
-
- /* Future optimization: clear only userspace mappings. */
- kvmppc_44x_shadow_release(vcpu, i);
- stlbe->word0 = 0;
- kvmppc_tlbe_set_modified(vcpu, i);
- KVMTRACE_5D(STLB_INVAL, vcpu, i,
- stlbe->tid, stlbe->word0, stlbe->word1,
- stlbe->word2, handler);
- }
- vcpu->arch.swap_pid = 0;
+ if (!get_tlb_v(tlbe))
+ return 0;
+
+ /* Does it match current guest AS? */
+ /* XXX what about IS != DS? */
+ if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
+ return 0;
+
+ gpa = get_tlb_raddr(tlbe);
+ if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
+ /* Mapping is not for RAM. */
+ return 0;
+
+ return 1;
+}
+
+int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
+{
+ struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
+ struct kvmppc_44x_tlbe *tlbe;
+ unsigned int gtlb_index;
+
+ gtlb_index = vcpu->arch.gpr[ra];
+ if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
+ printk("%s: index %d\n", __func__, gtlb_index);
+ kvmppc_dump_vcpu(vcpu);
+ return EMULATE_FAIL;
}
- vcpu->arch.shadow_pid = !usermode;
+ tlbe = &vcpu_44x->guest_tlb[gtlb_index];
+
+ /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */
+ if (tlbe->word0 & PPC44x_TLB_VALID)
+ kvmppc_44x_invalidate(vcpu, gtlb_index);
+
+ switch (ws) {
+ case PPC44x_TLB_PAGEID:
+ tlbe->tid = get_mmucr_stid(vcpu);
+ tlbe->word0 = vcpu->arch.gpr[rs];
+ break;
+
+ case PPC44x_TLB_XLAT:
+ tlbe->word1 = vcpu->arch.gpr[rs];
+ break;
+
+ case PPC44x_TLB_ATTRIB:
+ tlbe->word2 = vcpu->arch.gpr[rs];
+ break;
+
+ default:
+ return EMULATE_FAIL;
+ }
+
+ if (tlbe_is_host_safe(vcpu, tlbe)) {
+ u64 asid;
+ gva_t eaddr;
+ gpa_t gpaddr;
+ u32 flags;
+ u32 bytes;
+
+ eaddr = get_tlb_eaddr(tlbe);
+ gpaddr = get_tlb_raddr(tlbe);
+
+ /* Use the advertised page size to mask effective and real addrs. */
+ bytes = get_tlb_bytes(tlbe);
+ eaddr &= ~(bytes - 1);
+ gpaddr &= ~(bytes - 1);
+
+ asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
+ flags = tlbe->word2 & 0xffff;
+
+ kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes, gtlb_index);
+ }
+
+ KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0,
+ tlbe->word1, tlbe->word2, handler);
+
+ kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
+ return EMULATE_DONE;
+}
+
+int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
+{
+ u32 ea;
+ int gtlb_index;
+ unsigned int as = get_mmucr_sts(vcpu);
+ unsigned int pid = get_mmucr_stid(vcpu);
+
+ ea = vcpu->arch.gpr[rb];
+ if (ra)
+ ea += vcpu->arch.gpr[ra];
+
+ gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
+ if (rc) {
+ if (gtlb_index < 0)
+ vcpu->arch.cr &= ~0x20000000;
+ else
+ vcpu->arch.cr |= 0x20000000;
+ }
+ vcpu->arch.gpr[rt] = gtlb_index;
+
+ kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
+ return EMULATE_DONE;
}
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h
index 2ccd46b6f6b7..772191f29e62 100644
--- a/arch/powerpc/kvm/44x_tlb.h
+++ b/arch/powerpc/kvm/44x_tlb.h
@@ -25,48 +25,52 @@
extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr,
unsigned int pid, unsigned int as);
-extern struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr);
-extern struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr);
+extern int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
+extern int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
+
+extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb,
+ u8 rc);
+extern int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws);
/* TLB helper functions */
-static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
+static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe)
{
return (tlbe->word0 >> 4) & 0xf;
}
-static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe)
+static inline gva_t get_tlb_eaddr(const struct kvmppc_44x_tlbe *tlbe)
{
return tlbe->word0 & 0xfffffc00;
}
-static inline gva_t get_tlb_bytes(const struct tlbe *tlbe)
+static inline gva_t get_tlb_bytes(const struct kvmppc_44x_tlbe *tlbe)
{
unsigned int pgsize = get_tlb_size(tlbe);
return 1 << 10 << (pgsize << 1);
}
-static inline gva_t get_tlb_end(const struct tlbe *tlbe)
+static inline gva_t get_tlb_end(const struct kvmppc_44x_tlbe *tlbe)
{
return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1;
}
-static inline u64 get_tlb_raddr(const struct tlbe *tlbe)
+static inline u64 get_tlb_raddr(const struct kvmppc_44x_tlbe *tlbe)
{
u64 word1 = tlbe->word1;
return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00);
}
-static inline unsigned int get_tlb_tid(const struct tlbe *tlbe)
+static inline unsigned int get_tlb_tid(const struct kvmppc_44x_tlbe *tlbe)
{
return tlbe->tid & 0xff;
}
-static inline unsigned int get_tlb_ts(const struct tlbe *tlbe)
+static inline unsigned int get_tlb_ts(const struct kvmppc_44x_tlbe *tlbe)
{
return (tlbe->word0 >> 8) & 0x1;
}
-static inline unsigned int get_tlb_v(const struct tlbe *tlbe)
+static inline unsigned int get_tlb_v(const struct kvmppc_44x_tlbe *tlbe)
{
return (tlbe->word0 >> 9) & 0x1;
}
@@ -81,7 +85,7 @@ static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu)
return (vcpu->arch.mmucr >> 16) & 0x1;
}
-static inline gpa_t tlb_xlate(struct tlbe *tlbe, gva_t eaddr)
+static inline gpa_t tlb_xlate(struct kvmppc_44x_tlbe *tlbe, gva_t eaddr)
{
unsigned int pgmask = get_tlb_bytes(tlbe) - 1;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 53aaa66b25e5..6dbdc4817d80 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -15,27 +15,33 @@ menuconfig VIRTUALIZATION
if VIRTUALIZATION
config KVM
- bool "Kernel-based Virtual Machine (KVM) support"
- depends on 44x && EXPERIMENTAL
+ bool
select PREEMPT_NOTIFIERS
select ANON_INODES
- # We can only run on Book E hosts so far
- select KVM_BOOKE_HOST
+
+config KVM_440
+ bool "KVM support for PowerPC 440 processors"
+ depends on EXPERIMENTAL && 44x
+ select KVM
---help---
- Support hosting virtualized guest machines. You will also
- need to select one or more of the processor modules below.
+ Support running unmodified 440 guest kernels in virtual machines on
+ 440 host processors.
This module provides access to the hardware capabilities through
a character device node named /dev/kvm.
If unsure, say N.
-config KVM_BOOKE_HOST
- bool "KVM host support for Book E PowerPC processors"
- depends on KVM && 44x
+config KVM_EXIT_TIMING
+ bool "Detailed exit timing"
+ depends on KVM
---help---
- Provides host support for KVM on Book E PowerPC processors. Currently
- this works on 440 processors only.
+ Calculate elapsed time for every exit/enter cycle. A per-vcpu
+ report is available in debugfs kvm/vm#_vcpu#_timing.
+ The overhead is relatively small, however it is not recommended for
+ production environments.
+
+ If unsure, say N.
config KVM_TRACE
bool "KVM trace support"
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 2a5d4397ac4b..df7ba59e6d53 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -8,10 +8,16 @@ common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o)
-kvm-objs := $(common-objs-y) powerpc.o emulate.o booke_guest.o
+kvm-objs := $(common-objs-y) powerpc.o emulate.o
+obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
obj-$(CONFIG_KVM) += kvm.o
AFLAGS_booke_interrupts.o := -I$(obj)
-kvm-booke-host-objs := booke_host.o booke_interrupts.o 44x_tlb.o
-obj-$(CONFIG_KVM_BOOKE_HOST) += kvm-booke-host.o
+kvm-440-objs := \
+ booke.o \
+ booke_interrupts.o \
+ 44x.o \
+ 44x_tlb.o \
+ 44x_emulate.o
+obj-$(CONFIG_KVM_440) += kvm-440.o
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke.c
index 7b2591e26bae..35485dd6927e 100644
--- a/arch/powerpc/kvm/booke_guest.c
+++ b/arch/powerpc/kvm/booke.c
@@ -24,21 +24,26 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
+
#include <asm/cputable.h>
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
+#include "timing.h"
+#include <asm/cacheflush.h>
+#include <asm/kvm_44x.h>
+#include "booke.h"
#include "44x_tlb.h"
+unsigned long kvmppc_booke_handlers;
+
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
struct kvm_stats_debugfs_item debugfs_entries[] = {
- { "exits", VCPU_STAT(sum_exits) },
{ "mmio", VCPU_STAT(mmio_exits) },
{ "dcr", VCPU_STAT(dcr_exits) },
{ "sig", VCPU_STAT(signal_exits) },
- { "light", VCPU_STAT(light_exits) },
{ "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
{ "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
{ "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
@@ -53,103 +58,19 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
-static const u32 interrupt_msr_mask[16] = {
- [BOOKE_INTERRUPT_CRITICAL] = MSR_ME,
- [BOOKE_INTERRUPT_MACHINE_CHECK] = 0,
- [BOOKE_INTERRUPT_DATA_STORAGE] = MSR_CE|MSR_ME|MSR_DE,
- [BOOKE_INTERRUPT_INST_STORAGE] = MSR_CE|MSR_ME|MSR_DE,
- [BOOKE_INTERRUPT_EXTERNAL] = MSR_CE|MSR_ME|MSR_DE,
- [BOOKE_INTERRUPT_ALIGNMENT] = MSR_CE|MSR_ME|MSR_DE,
- [BOOKE_INTERRUPT_PROGRAM] = MSR_CE|MSR_ME|MSR_DE,
- [BOOKE_INTERRUPT_FP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE,
- [BOOKE_INTERRUPT_SYSCALL] = MSR_CE|MSR_ME|MSR_DE,
- [BOOKE_INTERRUPT_AP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE,
- [BOOKE_INTERRUPT_DECREMENTER] = MSR_CE|MSR_ME|MSR_DE,
- [BOOKE_INTERRUPT_FIT] = MSR_CE|MSR_ME|MSR_DE,
- [BOOKE_INTERRUPT_WATCHDOG] = MSR_ME,
- [BOOKE_INTERRUPT_DTLB_MISS] = MSR_CE|MSR_ME|MSR_DE,
- [BOOKE_INTERRUPT_ITLB_MISS] = MSR_CE|MSR_ME|MSR_DE,
- [BOOKE_INTERRUPT_DEBUG] = MSR_ME,
-};
-
-const unsigned char exception_priority[] = {
- [BOOKE_INTERRUPT_DATA_STORAGE] = 0,
- [BOOKE_INTERRUPT_INST_STORAGE] = 1,
- [BOOKE_INTERRUPT_ALIGNMENT] = 2,
- [BOOKE_INTERRUPT_PROGRAM] = 3,
- [BOOKE_INTERRUPT_FP_UNAVAIL] = 4,
- [BOOKE_INTERRUPT_SYSCALL] = 5,
- [BOOKE_INTERRUPT_AP_UNAVAIL] = 6,
- [BOOKE_INTERRUPT_DTLB_MISS] = 7,
- [BOOKE_INTERRUPT_ITLB_MISS] = 8,
- [BOOKE_INTERRUPT_MACHINE_CHECK] = 9,
- [BOOKE_INTERRUPT_DEBUG] = 10,
- [BOOKE_INTERRUPT_CRITICAL] = 11,
- [BOOKE_INTERRUPT_WATCHDOG] = 12,
- [BOOKE_INTERRUPT_EXTERNAL] = 13,
- [BOOKE_INTERRUPT_FIT] = 14,
- [BOOKE_INTERRUPT_DECREMENTER] = 15,
-};
-
-const unsigned char priority_exception[] = {
- BOOKE_INTERRUPT_DATA_STORAGE,
- BOOKE_INTERRUPT_INST_STORAGE,
- BOOKE_INTERRUPT_ALIGNMENT,
- BOOKE_INTERRUPT_PROGRAM,
- BOOKE_INTERRUPT_FP_UNAVAIL,
- BOOKE_INTERRUPT_SYSCALL,
- BOOKE_INTERRUPT_AP_UNAVAIL,
- BOOKE_INTERRUPT_DTLB_MISS,
- BOOKE_INTERRUPT_ITLB_MISS,
- BOOKE_INTERRUPT_MACHINE_CHECK,
- BOOKE_INTERRUPT_DEBUG,
- BOOKE_INTERRUPT_CRITICAL,
- BOOKE_INTERRUPT_WATCHDOG,
- BOOKE_INTERRUPT_EXTERNAL,
- BOOKE_INTERRUPT_FIT,
- BOOKE_INTERRUPT_DECREMENTER,
-};
-
-
-void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
-{
- struct tlbe *tlbe;
- int i;
-
- printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
- printk("| %2s | %3s | %8s | %8s | %8s |\n",
- "nr", "tid", "word0", "word1", "word2");
-
- for (i = 0; i < PPC44x_TLB_SIZE; i++) {
- tlbe = &vcpu->arch.guest_tlb[i];
- if (tlbe->word0 & PPC44x_TLB_VALID)
- printk(" G%2d | %02X | %08X | %08X | %08X |\n",
- i, tlbe->tid, tlbe->word0, tlbe->word1,
- tlbe->word2);
- }
-
- for (i = 0; i < PPC44x_TLB_SIZE; i++) {
- tlbe = &vcpu->arch.shadow_tlb[i];
- if (tlbe->word0 & PPC44x_TLB_VALID)
- printk(" S%2d | %02X | %08X | %08X | %08X |\n",
- i, tlbe->tid, tlbe->word0, tlbe->word1,
- tlbe->word2);
- }
-}
-
/* TODO: use vcpu_printf() */
void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
{
int i;
- printk("pc: %08x msr: %08x\n", vcpu->arch.pc, vcpu->arch.msr);
- printk("lr: %08x ctr: %08x\n", vcpu->arch.lr, vcpu->arch.ctr);
- printk("srr0: %08x srr1: %08x\n", vcpu->arch.srr0, vcpu->arch.srr1);
+ printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr);
+ printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
+ printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1);
printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
for (i = 0; i < 32; i += 4) {
- printk("gpr%02d: %08x %08x %08x %08x\n", i,
+ printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
vcpu->arch.gpr[i],
vcpu->arch.gpr[i+1],
vcpu->arch.gpr[i+2],
@@ -157,69 +78,96 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
}
}
-/* Check if we are ready to deliver the interrupt */
-static int kvmppc_can_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
+static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
+ unsigned int priority)
{
- int r;
+ set_bit(priority, &vcpu->arch.pending_exceptions);
+}
- switch (interrupt) {
- case BOOKE_INTERRUPT_CRITICAL:
- r = vcpu->arch.msr & MSR_CE;
- break;
- case BOOKE_INTERRUPT_MACHINE_CHECK:
- r = vcpu->arch.msr & MSR_ME;
- break;
- case BOOKE_INTERRUPT_EXTERNAL:
- r = vcpu->arch.msr & MSR_EE;
+void kvmppc_core_queue_program(struct kvm_vcpu *vcpu)
+{
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
+}
+
+void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
+{
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
+}
+
+int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
+{
+ return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
+}
+
+void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq)
+{
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
+}
+
+/* Deliver the interrupt of the corresponding priority, if possible. */
+static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
+ unsigned int priority)
+{
+ int allowed = 0;
+ ulong msr_mask;
+
+ switch (priority) {
+ case BOOKE_IRQPRIO_PROGRAM:
+ case BOOKE_IRQPRIO_DTLB_MISS:
+ case BOOKE_IRQPRIO_ITLB_MISS:
+ case BOOKE_IRQPRIO_SYSCALL:
+ case BOOKE_IRQPRIO_DATA_STORAGE:
+ case BOOKE_IRQPRIO_INST_STORAGE:
+ case BOOKE_IRQPRIO_FP_UNAVAIL:
+ case BOOKE_IRQPRIO_AP_UNAVAIL:
+ case BOOKE_IRQPRIO_ALIGNMENT:
+ allowed = 1;
+ msr_mask = MSR_CE|MSR_ME|MSR_DE;
break;
- case BOOKE_INTERRUPT_DECREMENTER:
- r = vcpu->arch.msr & MSR_EE;
+ case BOOKE_IRQPRIO_CRITICAL:
+ case BOOKE_IRQPRIO_WATCHDOG:
+ allowed = vcpu->arch.msr & MSR_CE;
+ msr_mask = MSR_ME;
break;
- case BOOKE_INTERRUPT_FIT:
- r = vcpu->arch.msr & MSR_EE;
+ case BOOKE_IRQPRIO_MACHINE_CHECK:
+ allowed = vcpu->arch.msr & MSR_ME;
+ msr_mask = 0;
break;
- case BOOKE_INTERRUPT_WATCHDOG:
- r = vcpu->arch.msr & MSR_CE;
+ case BOOKE_IRQPRIO_EXTERNAL:
+ case BOOKE_IRQPRIO_DECREMENTER:
+ case BOOKE_IRQPRIO_FIT:
+ allowed = vcpu->arch.msr & MSR_EE;
+ msr_mask = MSR_CE|MSR_ME|MSR_DE;
break;
- case BOOKE_INTERRUPT_DEBUG:
- r = vcpu->arch.msr & MSR_DE;
+ case BOOKE_IRQPRIO_DEBUG:
+ allowed = vcpu->arch.msr & MSR_DE;
+ msr_mask = MSR_ME;
break;
- default:
- r = 1;
}
- return r;
-}
+ if (allowed) {
+ vcpu->arch.srr0 = vcpu->arch.pc;
+ vcpu->arch.srr1 = vcpu->arch.msr;
+ vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
+ kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask);
-static void kvmppc_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
-{
- switch (interrupt) {
- case BOOKE_INTERRUPT_DECREMENTER:
- vcpu->arch.tsr |= TSR_DIS;
- break;
+ clear_bit(priority, &vcpu->arch.pending_exceptions);
}
- vcpu->arch.srr0 = vcpu->arch.pc;
- vcpu->arch.srr1 = vcpu->arch.msr;
- vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[interrupt];
- kvmppc_set_msr(vcpu, vcpu->arch.msr & interrupt_msr_mask[interrupt]);
+ return allowed;
}
/* Check pending exceptions and deliver one, if possible. */
-void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu)
+void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
{
unsigned long *pending = &vcpu->arch.pending_exceptions;
- unsigned int exception;
unsigned int priority;
- priority = find_first_bit(pending, BITS_PER_BYTE * sizeof(*pending));
+ priority = __ffs(*pending);
while (priority <= BOOKE_MAX_INTERRUPT) {
- exception = priority_exception[priority];
- if (kvmppc_can_deliver_interrupt(vcpu, exception)) {
- kvmppc_clear_exception(vcpu, exception);
- kvmppc_deliver_interrupt(vcpu, exception);
+ if (kvmppc_booke_irqprio_deliver(vcpu, priority))
break;
- }
priority = find_next_bit(pending,
BITS_PER_BYTE * sizeof(*pending),
@@ -238,6 +186,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
enum emulation_result er;
int r = RESUME_HOST;
+ /* update before a new last_exit_type is rewritten */
+ kvmppc_update_timing_stats(vcpu);
+
local_irq_enable();
run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -251,21 +202,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOKE_INTERRUPT_EXTERNAL:
+ kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
+ if (need_resched())
+ cond_resched();
+ r = RESUME_GUEST;
+ break;
+
case BOOKE_INTERRUPT_DECREMENTER:
/* Since we switched IVPR back to the host's value, the host
* handled this interrupt the moment we enabled interrupts.
* Now we just offer it a chance to reschedule the guest. */
-
- /* XXX At this point the TLB still holds our shadow TLB, so if
- * we do reschedule the host will fault over it. Perhaps we
- * should politely restore the host's entries to minimize
- * misses before ceding control. */
+ kvmppc_account_exit(vcpu, DEC_EXITS);
if (need_resched())
cond_resched();
- if (exit_nr == BOOKE_INTERRUPT_DECREMENTER)
- vcpu->stat.dec_exits++;
- else
- vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST;
break;
@@ -274,17 +223,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Program traps generated by user-level software must be handled
* by the guest kernel. */
vcpu->arch.esr = vcpu->arch.fault_esr;
- kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
r = RESUME_GUEST;
+ kvmppc_account_exit(vcpu, USR_PR_INST);
break;
}
er = kvmppc_emulate_instruction(run, vcpu);
switch (er) {
case EMULATE_DONE:
+ /* don't overwrite subtypes, just account kvm_stats */
+ kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
/* Future optimization: only reload non-volatiles if
* they were actually modified by emulation. */
- vcpu->stat.emulated_inst_exits++;
r = RESUME_GUEST_NV;
break;
case EMULATE_DO_DCR:
@@ -293,7 +244,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case EMULATE_FAIL:
/* XXX Deliver Program interrupt to guest. */
- printk(KERN_CRIT "%s: emulation at %x failed (%08x)\n",
+ printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
__func__, vcpu->arch.pc, vcpu->arch.last_inst);
/* For debugging, encode the failing instruction and
* report it to userspace. */
@@ -307,48 +258,53 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOKE_INTERRUPT_FP_UNAVAIL:
- kvmppc_queue_exception(vcpu, exit_nr);
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
+ kvmppc_account_exit(vcpu, FP_UNAVAIL);
r = RESUME_GUEST;
break;
case BOOKE_INTERRUPT_DATA_STORAGE:
vcpu->arch.dear = vcpu->arch.fault_dear;
vcpu->arch.esr = vcpu->arch.fault_esr;
- kvmppc_queue_exception(vcpu, exit_nr);
- vcpu->stat.dsi_exits++;
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
+ kvmppc_account_exit(vcpu, DSI_EXITS);
r = RESUME_GUEST;
break;
case BOOKE_INTERRUPT_INST_STORAGE:
vcpu->arch.esr = vcpu->arch.fault_esr;
- kvmppc_queue_exception(vcpu, exit_nr);
- vcpu->stat.isi_exits++;
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
+ kvmppc_account_exit(vcpu, ISI_EXITS);
r = RESUME_GUEST;
break;
case BOOKE_INTERRUPT_SYSCALL:
- kvmppc_queue_exception(vcpu, exit_nr);
- vcpu->stat.syscall_exits++;
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
+ kvmppc_account_exit(vcpu, SYSCALL_EXITS);
r = RESUME_GUEST;
break;
+ /* XXX move to a 440-specific file. */
case BOOKE_INTERRUPT_DTLB_MISS: {
- struct tlbe *gtlbe;
+ struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
+ struct kvmppc_44x_tlbe *gtlbe;
unsigned long eaddr = vcpu->arch.fault_dear;
+ int gtlb_index;
gfn_t gfn;
/* Check the guest TLB. */
- gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr);
- if (!gtlbe) {
+ gtlb_index = kvmppc_44x_dtlb_index(vcpu, eaddr);
+ if (gtlb_index < 0) {
/* The guest didn't have a mapping for it. */
- kvmppc_queue_exception(vcpu, exit_nr);
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
vcpu->arch.dear = vcpu->arch.fault_dear;
vcpu->arch.esr = vcpu->arch.fault_esr;
- vcpu->stat.dtlb_real_miss_exits++;
+ kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
r = RESUME_GUEST;
break;
}
+ gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr);
gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT;
@@ -359,38 +315,45 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* b) the guest used a large mapping which we're faking
* Either way, we need to satisfy the fault without
* invoking the guest. */
- kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
- gtlbe->word2);
- vcpu->stat.dtlb_virt_miss_exits++;
+ kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid,
+ gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index);
+ kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
r = RESUME_GUEST;
} else {
/* Guest has mapped and accessed a page which is not
* actually RAM. */
r = kvmppc_emulate_mmio(run, vcpu);
+ kvmppc_account_exit(vcpu, MMIO_EXITS);
}
break;
}
+ /* XXX move to a 440-specific file. */
case BOOKE_INTERRUPT_ITLB_MISS: {
- struct tlbe *gtlbe;
+ struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
+ struct kvmppc_44x_tlbe *gtlbe;
unsigned long eaddr = vcpu->arch.pc;
+ gpa_t gpaddr;
gfn_t gfn;
+ int gtlb_index;
r = RESUME_GUEST;
/* Check the guest TLB. */
- gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr);
- if (!gtlbe) {
+ gtlb_index = kvmppc_44x_itlb_index(vcpu, eaddr);
+ if (gtlb_index < 0) {
/* The guest didn't have a mapping for it. */
- kvmppc_queue_exception(vcpu, exit_nr);
- vcpu->stat.itlb_real_miss_exits++;
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
+ kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
break;
}
- vcpu->stat.itlb_virt_miss_exits++;
+ kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
- gfn = tlb_xlate(gtlbe, eaddr) >> PAGE_SHIFT;
+ gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
+ gpaddr = tlb_xlate(gtlbe, eaddr);
+ gfn = gpaddr >> PAGE_SHIFT;
if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
/* The guest TLB had a mapping, but the shadow TLB
@@ -399,12 +362,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* b) the guest used a large mapping which we're faking
* Either way, we need to satisfy the fault without
* invoking the guest. */
- kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
- gtlbe->word2);
+ kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid,
+ gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index);
} else {
/* Guest mapped and leaped at non-RAM! */
- kvmppc_queue_exception(vcpu,
- BOOKE_INTERRUPT_MACHINE_CHECK);
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
}
break;
@@ -421,6 +383,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
mtspr(SPRN_DBSR, dbsr);
run->exit_reason = KVM_EXIT_DEBUG;
+ kvmppc_account_exit(vcpu, DEBUG_EXITS);
r = RESUME_HOST;
break;
}
@@ -432,10 +395,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
local_irq_disable();
- kvmppc_check_and_deliver_interrupts(vcpu);
+ kvmppc_core_deliver_interrupts(vcpu);
- /* Do some exit accounting. */
- vcpu->stat.sum_exits++;
if (!(r & RESUME_HOST)) {
/* To avoid clobbering exit_reason, only check for signals if
* we aren't already exiting to userspace for some other
@@ -443,22 +404,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (signal_pending(current)) {
run->exit_reason = KVM_EXIT_INTR;
r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
-
- vcpu->stat.signal_exits++;
- } else {
- vcpu->stat.light_exits++;
- }
- } else {
- switch (run->exit_reason) {
- case KVM_EXIT_MMIO:
- vcpu->stat.mmio_exits++;
- break;
- case KVM_EXIT_DCR:
- vcpu->stat.dcr_exits++;
- break;
- case KVM_EXIT_INTR:
- vcpu->stat.signal_exits++;
- break;
+ kvmppc_account_exit(vcpu, SIGNAL_EXITS);
}
}
@@ -468,20 +414,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
- struct tlbe *tlbe = &vcpu->arch.guest_tlb[0];
-
- tlbe->tid = 0;
- tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
- tlbe->word1 = 0;
- tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
-
- tlbe++;
- tlbe->tid = 0;
- tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
- tlbe->word1 = 0xef600000;
- tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
- | PPC44x_TLB_I | PPC44x_TLB_G;
-
vcpu->arch.pc = 0;
vcpu->arch.msr = 0;
vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
@@ -492,12 +424,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
* before it's programmed its own IVPR. */
vcpu->arch.ivpr = 0x55550000;
- /* Since the guest can directly access the timebase, it must know the
- * real timebase frequency. Accordingly, it must see the state of
- * CCR1[TCS]. */
- vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
+ kvmppc_init_timing_stats(vcpu);
- return 0;
+ return kvmppc_core_vcpu_setup(vcpu);
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
@@ -536,7 +465,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.ctr = regs->ctr;
vcpu->arch.lr = regs->lr;
vcpu->arch.xer = regs->xer;
- vcpu->arch.msr = regs->msr;
+ kvmppc_set_msr(vcpu, regs->msr);
vcpu->arch.srr0 = regs->srr0;
vcpu->arch.srr1 = regs->srr1;
vcpu->arch.sprg0 = regs->sprg0;
@@ -575,31 +504,62 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return -ENOTSUPP;
}
-/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
- struct tlbe *gtlbe;
- int index;
- gva_t eaddr;
- u8 pid;
- u8 as;
-
- eaddr = tr->linear_address;
- pid = (tr->linear_address >> 32) & 0xff;
- as = (tr->linear_address >> 40) & 0x1;
-
- index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
- if (index == -1) {
- tr->valid = 0;
- return 0;
- }
+ return kvmppc_core_vcpu_translate(vcpu, tr);
+}
- gtlbe = &vcpu->arch.guest_tlb[index];
+int kvmppc_booke_init(void)
+{
+ unsigned long ivor[16];
+ unsigned long max_ivor = 0;
+ int i;
- tr->physical_address = tlb_xlate(gtlbe, eaddr);
- /* XXX what does "writeable" and "usermode" even mean? */
- tr->valid = 1;
+ /* We install our own exception handlers by hijacking IVPR. IVPR must
+ * be 16-bit aligned, so we need a 64KB allocation. */
+ kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ VCPU_SIZE_ORDER);
+ if (!kvmppc_booke_handlers)
+ return -ENOMEM;
+
+ /* XXX make sure our handlers are smaller than Linux's */
+
+ /* Copy our interrupt handlers to match host IVORs. That way we don't
+ * have to swap the IVORs on every guest/host transition. */
+ ivor[0] = mfspr(SPRN_IVOR0);
+ ivor[1] = mfspr(SPRN_IVOR1);
+ ivor[2] = mfspr(SPRN_IVOR2);
+ ivor[3] = mfspr(SPRN_IVOR3);
+ ivor[4] = mfspr(SPRN_IVOR4);
+ ivor[5] = mfspr(SPRN_IVOR5);
+ ivor[6] = mfspr(SPRN_IVOR6);
+ ivor[7] = mfspr(SPRN_IVOR7);
+ ivor[8] = mfspr(SPRN_IVOR8);
+ ivor[9] = mfspr(SPRN_IVOR9);
+ ivor[10] = mfspr(SPRN_IVOR10);
+ ivor[11] = mfspr(SPRN_IVOR11);
+ ivor[12] = mfspr(SPRN_IVOR12);
+ ivor[13] = mfspr(SPRN_IVOR13);
+ ivor[14] = mfspr(SPRN_IVOR14);
+ ivor[15] = mfspr(SPRN_IVOR15);
+
+ for (i = 0; i < 16; i++) {
+ if (ivor[i] > max_ivor)
+ max_ivor = ivor[i];
+
+ memcpy((void *)kvmppc_booke_handlers + ivor[i],
+ kvmppc_handlers_start + i * kvmppc_handler_len,
+ kvmppc_handler_len);
+ }
+ flush_icache_range(kvmppc_booke_handlers,
+ kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
return 0;
}
+
+void __exit kvmppc_booke_exit(void)
+{
+ free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
+ kvm_exit();
+}
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
new file mode 100644
index 000000000000..cf7c94ca24bf
--- /dev/null
+++ b/arch/powerpc/kvm/booke.h
@@ -0,0 +1,60 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __KVM_BOOKE_H__
+#define __KVM_BOOKE_H__
+
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+#include "timing.h"
+
+/* interrupt priortity ordering */
+#define BOOKE_IRQPRIO_DATA_STORAGE 0
+#define BOOKE_IRQPRIO_INST_STORAGE 1
+#define BOOKE_IRQPRIO_ALIGNMENT 2
+#define BOOKE_IRQPRIO_PROGRAM 3
+#define BOOKE_IRQPRIO_FP_UNAVAIL 4
+#define BOOKE_IRQPRIO_SYSCALL 5
+#define BOOKE_IRQPRIO_AP_UNAVAIL 6
+#define BOOKE_IRQPRIO_DTLB_MISS 7
+#define BOOKE_IRQPRIO_ITLB_MISS 8
+#define BOOKE_IRQPRIO_MACHINE_CHECK 9
+#define BOOKE_IRQPRIO_DEBUG 10
+#define BOOKE_IRQPRIO_CRITICAL 11
+#define BOOKE_IRQPRIO_WATCHDOG 12
+#define BOOKE_IRQPRIO_EXTERNAL 13
+#define BOOKE_IRQPRIO_FIT 14
+#define BOOKE_IRQPRIO_DECREMENTER 15
+
+/* Helper function for "full" MSR writes. No need to call this if only EE is
+ * changing. */
+static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
+{
+ if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
+ kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
+
+ vcpu->arch.msr = new_msr;
+
+ if (vcpu->arch.msr & MSR_WE) {
+ kvm_vcpu_block(vcpu);
+ kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
+ };
+}
+
+#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_host.c b/arch/powerpc/kvm/booke_host.c
deleted file mode 100644
index b480341bc31e..000000000000
--- a/arch/powerpc/kvm/booke_host.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
- * Copyright IBM Corp. 2008
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- */
-
-#include <linux/errno.h>
-#include <linux/kvm_host.h>
-#include <linux/module.h>
-#include <asm/cacheflush.h>
-#include <asm/kvm_ppc.h>
-
-unsigned long kvmppc_booke_handlers;
-
-static int kvmppc_booke_init(void)
-{
- unsigned long ivor[16];
- unsigned long max_ivor = 0;
- int i;
-
- /* We install our own exception handlers by hijacking IVPR. IVPR must
- * be 16-bit aligned, so we need a 64KB allocation. */
- kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
- VCPU_SIZE_ORDER);
- if (!kvmppc_booke_handlers)
- return -ENOMEM;
-
- /* XXX make sure our handlers are smaller than Linux's */
-
- /* Copy our interrupt handlers to match host IVORs. That way we don't
- * have to swap the IVORs on every guest/host transition. */
- ivor[0] = mfspr(SPRN_IVOR0);
- ivor[1] = mfspr(SPRN_IVOR1);
- ivor[2] = mfspr(SPRN_IVOR2);
- ivor[3] = mfspr(SPRN_IVOR3);
- ivor[4] = mfspr(SPRN_IVOR4);
- ivor[5] = mfspr(SPRN_IVOR5);
- ivor[6] = mfspr(SPRN_IVOR6);
- ivor[7] = mfspr(SPRN_IVOR7);
- ivor[8] = mfspr(SPRN_IVOR8);
- ivor[9] = mfspr(SPRN_IVOR9);
- ivor[10] = mfspr(SPRN_IVOR10);
- ivor[11] = mfspr(SPRN_IVOR11);
- ivor[12] = mfspr(SPRN_IVOR12);
- ivor[13] = mfspr(SPRN_IVOR13);
- ivor[14] = mfspr(SPRN_IVOR14);
- ivor[15] = mfspr(SPRN_IVOR15);
-
- for (i = 0; i < 16; i++) {
- if (ivor[i] > max_ivor)
- max_ivor = ivor[i];
-
- memcpy((void *)kvmppc_booke_handlers + ivor[i],
- kvmppc_handlers_start + i * kvmppc_handler_len,
- kvmppc_handler_len);
- }
- flush_icache_range(kvmppc_booke_handlers,
- kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
-
- return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
-}
-
-static void __exit kvmppc_booke_exit(void)
-{
- free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
- kvm_exit();
-}
-
-module_init(kvmppc_booke_init)
-module_exit(kvmppc_booke_exit)
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 95e165baf85f..084ebcd7dd83 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -107,6 +107,18 @@ _GLOBAL(kvmppc_resume_host)
li r6, 1
slw r6, r6, r5
+#ifdef CONFIG_KVM_EXIT_TIMING
+ /* save exit time */
+1:
+ mfspr r7, SPRN_TBRU
+ mfspr r8, SPRN_TBRL
+ mfspr r9, SPRN_TBRU
+ cmpw r9, r7
+ bne 1b
+ stw r8, VCPU_TIMING_EXIT_TBL(r4)
+ stw r9, VCPU_TIMING_EXIT_TBU(r4)
+#endif
+
/* Save the faulting instruction and all GPRs for emulation. */
andi. r7, r6, NEED_INST_MASK
beq ..skip_inst_copy
@@ -335,54 +347,6 @@ lightweight_exit:
lwz r3, VCPU_SHADOW_PID(r4)
mtspr SPRN_PID, r3
- /* Prevent all asynchronous TLB updates. */
- mfmsr r5
- lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h
- ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
- andc r6, r5, r6
- mtmsr r6
-
- /* Load the guest mappings, leaving the host's "pinned" kernel mappings
- * in place. */
- mfspr r10, SPRN_MMUCR /* Save host MMUCR. */
- li r5, PPC44x_TLB_SIZE
- lis r5, tlb_44x_hwater@ha
- lwz r5, tlb_44x_hwater@l(r5)
- mtctr r5
- addi r9, r4, VCPU_SHADOW_TLB
- addi r5, r4, VCPU_SHADOW_MOD
- li r3, 0
-1:
- lbzx r7, r3, r5
- cmpwi r7, 0
- beq 3f
-
- /* Load guest entry. */
- mulli r11, r3, TLBE_BYTES
- add r11, r11, r9
- lwz r7, 0(r11)
- mtspr SPRN_MMUCR, r7
- lwz r7, 4(r11)
- tlbwe r7, r3, PPC44x_TLB_PAGEID
- lwz r7, 8(r11)
- tlbwe r7, r3, PPC44x_TLB_XLAT
- lwz r7, 12(r11)
- tlbwe r7, r3, PPC44x_TLB_ATTRIB
-3:
- addi r3, r3, 1 /* Increment index. */
- bdnz 1b
-
- mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */
-
- /* Clear bitmap of modified TLB entries */
- li r5, PPC44x_TLB_SIZE>>2
- mtctr r5
- addi r5, r4, VCPU_SHADOW_MOD - 4
- li r6, 0
-1:
- stwu r6, 4(r5)
- bdnz 1b
-
iccci 0, 0 /* XXX hack */
/* Load some guest volatiles. */
@@ -423,6 +387,18 @@ lightweight_exit:
lwz r3, VCPU_SPRG7(r4)
mtspr SPRN_SPRG7, r3
+#ifdef CONFIG_KVM_EXIT_TIMING
+ /* save enter time */
+1:
+ mfspr r6, SPRN_TBRU
+ mfspr r7, SPRN_TBRL
+ mfspr r8, SPRN_TBRU
+ cmpw r8, r6
+ bne 1b
+ stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
+ stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
+#endif
+
/* Finish loading guest volatiles and jump to guest. */
lwz r3, VCPU_CTR(r4)
mtctr r3
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 0fce4fbdc20d..d1d38daa93fb 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -23,161 +23,14 @@
#include <linux/string.h>
#include <linux/kvm_host.h>
-#include <asm/dcr.h>
-#include <asm/dcr-regs.h>
+#include <asm/reg.h>
#include <asm/time.h>
#include <asm/byteorder.h>
#include <asm/kvm_ppc.h>
+#include <asm/disassemble.h>
+#include "timing.h"
-#include "44x_tlb.h"
-
-/* Instruction decoding */
-static inline unsigned int get_op(u32 inst)
-{
- return inst >> 26;
-}
-
-static inline unsigned int get_xop(u32 inst)
-{
- return (inst >> 1) & 0x3ff;
-}
-
-static inline unsigned int get_sprn(u32 inst)
-{
- return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
-}
-
-static inline unsigned int get_dcrn(u32 inst)
-{
- return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
-}
-
-static inline unsigned int get_rt(u32 inst)
-{
- return (inst >> 21) & 0x1f;
-}
-
-static inline unsigned int get_rs(u32 inst)
-{
- return (inst >> 21) & 0x1f;
-}
-
-static inline unsigned int get_ra(u32 inst)
-{
- return (inst >> 16) & 0x1f;
-}
-
-static inline unsigned int get_rb(u32 inst)
-{
- return (inst >> 11) & 0x1f;
-}
-
-static inline unsigned int get_rc(u32 inst)
-{
- return inst & 0x1;
-}
-
-static inline unsigned int get_ws(u32 inst)
-{
- return (inst >> 11) & 0x1f;
-}
-
-static inline unsigned int get_d(u32 inst)
-{
- return inst & 0xffff;
-}
-
-static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
- const struct tlbe *tlbe)
-{
- gpa_t gpa;
-
- if (!get_tlb_v(tlbe))
- return 0;
-
- /* Does it match current guest AS? */
- /* XXX what about IS != DS? */
- if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
- return 0;
-
- gpa = get_tlb_raddr(tlbe);
- if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
- /* Mapping is not for RAM. */
- return 0;
-
- return 1;
-}
-
-static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst)
-{
- u64 eaddr;
- u64 raddr;
- u64 asid;
- u32 flags;
- struct tlbe *tlbe;
- unsigned int ra;
- unsigned int rs;
- unsigned int ws;
- unsigned int index;
-
- ra = get_ra(inst);
- rs = get_rs(inst);
- ws = get_ws(inst);
-
- index = vcpu->arch.gpr[ra];
- if (index > PPC44x_TLB_SIZE) {
- printk("%s: index %d\n", __func__, index);
- kvmppc_dump_vcpu(vcpu);
- return EMULATE_FAIL;
- }
-
- tlbe = &vcpu->arch.guest_tlb[index];
-
- /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
- if (tlbe->word0 & PPC44x_TLB_VALID) {
- eaddr = get_tlb_eaddr(tlbe);
- asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
- kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid);
- }
-
- switch (ws) {
- case PPC44x_TLB_PAGEID:
- tlbe->tid = vcpu->arch.mmucr & 0xff;
- tlbe->word0 = vcpu->arch.gpr[rs];
- break;
-
- case PPC44x_TLB_XLAT:
- tlbe->word1 = vcpu->arch.gpr[rs];
- break;
-
- case PPC44x_TLB_ATTRIB:
- tlbe->word2 = vcpu->arch.gpr[rs];
- break;
-
- default:
- return EMULATE_FAIL;
- }
-
- if (tlbe_is_host_safe(vcpu, tlbe)) {
- eaddr = get_tlb_eaddr(tlbe);
- raddr = get_tlb_raddr(tlbe);
- asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
- flags = tlbe->word2 & 0xffff;
-
- /* Create a 4KB mapping on the host. If the guest wanted a
- * large page, only the first 4KB is mapped here and the rest
- * are mapped on the fly. */
- kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags);
- }
-
- KVMTRACE_5D(GTLB_WRITE, vcpu, index,
- tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2,
- handler);
-
- return EMULATE_DONE;
-}
-
-static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
+void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.tcr & TCR_DIE) {
/* The decrementer ticks at the same rate as the timebase, so
@@ -193,12 +46,6 @@ static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
}
}
-static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.pc = vcpu->arch.srr0;
- kvmppc_set_msr(vcpu, vcpu->arch.srr1);
-}
-
/* XXX to do:
* lhax
* lhaux
@@ -213,40 +60,30 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
*
* XXX is_bigendian should depend on MMU mapping or MSR[LE]
*/
+/* XXX Should probably auto-generate instruction decoding for a particular core
+ * from opcode tables in the future. */
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
u32 inst = vcpu->arch.last_inst;
u32 ea;
int ra;
int rb;
- int rc;
int rs;
int rt;
int sprn;
- int dcrn;
enum emulation_result emulated = EMULATE_DONE;
int advance = 1;
+ /* this default type might be overwritten by subcategories */
+ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
+
switch (get_op(inst)) {
- case 3: /* trap */
- printk("trap!\n");
- kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
+ case 3: /* trap */
+ vcpu->arch.esr |= ESR_PTR;
+ kvmppc_core_queue_program(vcpu);
advance = 0;
break;
- case 19:
- switch (get_xop(inst)) {
- case 50: /* rfi */
- kvmppc_emul_rfi(vcpu);
- advance = 0;
- break;
-
- default:
- emulated = EMULATE_FAIL;
- break;
- }
- break;
-
case 31:
switch (get_xop(inst)) {
@@ -255,27 +92,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
- case 83: /* mfmsr */
- rt = get_rt(inst);
- vcpu->arch.gpr[rt] = vcpu->arch.msr;
- break;
-
case 87: /* lbzx */
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break;
- case 131: /* wrtee */
- rs = get_rs(inst);
- vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
- | (vcpu->arch.gpr[rs] & MSR_EE);
- break;
-
- case 146: /* mtmsr */
- rs = get_rs(inst);
- kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
- break;
-
case 151: /* stwx */
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
@@ -283,11 +104,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
4, 1);
break;
- case 163: /* wrteei */
- vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
- | (inst & MSR_EE);
- break;
-
case 215: /* stbx */
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
@@ -328,42 +144,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.gpr[ra] = ea;
break;
- case 323: /* mfdcr */
- dcrn = get_dcrn(inst);
- rt = get_rt(inst);
-
- /* The guest may access CPR0 registers to determine the timebase
- * frequency, and it must know the real host frequency because it
- * can directly access the timebase registers.
- *
- * It would be possible to emulate those accesses in userspace,
- * but userspace can really only figure out the end frequency.
- * We could decompose that into the factors that compute it, but
- * that's tricky math, and it's easier to just report the real
- * CPR0 values.
- */
- switch (dcrn) {
- case DCRN_CPR0_CONFIG_ADDR:
- vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
- break;
- case DCRN_CPR0_CONFIG_DATA:
- local_irq_disable();
- mtdcr(DCRN_CPR0_CONFIG_ADDR,
- vcpu->arch.cpr0_cfgaddr);
- vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
- local_irq_enable();
- break;
- default:
- run->dcr.dcrn = dcrn;
- run->dcr.data = 0;
- run->dcr.is_write = 0;
- vcpu->arch.io_gpr = rt;
- vcpu->arch.dcr_needed = 1;
- emulated = EMULATE_DO_DCR;
- }
-
- break;
-
case 339: /* mfspr */
sprn = get_sprn(inst);
rt = get_rt(inst);
@@ -373,26 +153,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
case SPRN_SRR1:
vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
- case SPRN_MMUCR:
- vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
- case SPRN_PID:
- vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
- case SPRN_IVPR:
- vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
- case SPRN_CCR0:
- vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
- case SPRN_CCR1:
- vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
case SPRN_PVR:
vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
- case SPRN_DEAR:
- vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
- case SPRN_ESR:
- vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
- case SPRN_DBCR0:
- vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
- case SPRN_DBCR1:
- vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
/* Note: mftb and TBRL/TBWL are user-accessible, so
* the guest can always access the real TB anyways.
@@ -413,42 +175,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Note: SPRG4-7 are user-readable, so we don't get
* a trap. */
- case SPRN_IVOR0:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break;
- case SPRN_IVOR1:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break;
- case SPRN_IVOR2:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break;
- case SPRN_IVOR3:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break;
- case SPRN_IVOR4:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break;
- case SPRN_IVOR5:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break;
- case SPRN_IVOR6:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break;
- case SPRN_IVOR7:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break;
- case SPRN_IVOR8:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break;
- case SPRN_IVOR9:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break;
- case SPRN_IVOR10:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break;
- case SPRN_IVOR11:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break;
- case SPRN_IVOR12:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break;
- case SPRN_IVOR13:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break;
- case SPRN_IVOR14:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break;
- case SPRN_IVOR15:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break;
-
default:
- printk("mfspr: unknown spr %x\n", sprn);
- vcpu->arch.gpr[rt] = 0;
+ emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
+ if (emulated == EMULATE_FAIL) {
+ printk("mfspr: unknown spr %x\n", sprn);
+ vcpu->arch.gpr[rt] = 0;
+ }
break;
}
break;
@@ -478,25 +210,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.gpr[ra] = ea;
break;
- case 451: /* mtdcr */
- dcrn = get_dcrn(inst);
- rs = get_rs(inst);
-
- /* emulate some access in kernel */
- switch (dcrn) {
- case DCRN_CPR0_CONFIG_ADDR:
- vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
- break;
- default:
- run->dcr.dcrn = dcrn;
- run->dcr.data = vcpu->arch.gpr[rs];
- run->dcr.is_write = 1;
- vcpu->arch.dcr_needed = 1;
- emulated = EMULATE_DO_DCR;
- }
-
- break;
-
case 467: /* mtspr */
sprn = get_sprn(inst);
rs = get_rs(inst);
@@ -505,22 +218,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
case SPRN_SRR1:
vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
- case SPRN_MMUCR:
- vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
- case SPRN_PID:
- kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
- case SPRN_CCR0:
- vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
- case SPRN_CCR1:
- vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
- case SPRN_DEAR:
- vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
- case SPRN_ESR:
- vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
- case SPRN_DBCR0:
- vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
- case SPRN_DBCR1:
- vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
/* XXX We need to context-switch the timebase for
* watchdog and FIT. */
@@ -532,14 +229,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_emulate_dec(vcpu);
break;
- case SPRN_TSR:
- vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
-
- case SPRN_TCR:
- vcpu->arch.tcr = vcpu->arch.gpr[rs];
- kvmppc_emulate_dec(vcpu);
- break;
-
case SPRN_SPRG0:
vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
case SPRN_SPRG1:
@@ -549,56 +238,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case SPRN_SPRG3:
vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
- /* Note: SPRG4-7 are user-readable. These values are
- * loaded into the real SPRGs when resuming the
- * guest. */
- case SPRN_SPRG4:
- vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
- case SPRN_SPRG5:
- vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
- case SPRN_SPRG6:
- vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
- case SPRN_SPRG7:
- vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
-
- case SPRN_IVPR:
- vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR0:
- vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR1:
- vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR2:
- vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR3:
- vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR4:
- vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR5:
- vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR6:
- vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR7:
- vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR8:
- vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR9:
- vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR10:
- vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR11:
- vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR12:
- vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR13:
- vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR14:
- vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break;
- case SPRN_IVOR15:
- vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break;
-
default:
- printk("mtspr: unknown spr %x\n", sprn);
- emulated = EMULATE_FAIL;
+ emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
+ if (emulated == EMULATE_FAIL)
+ printk("mtspr: unknown spr %x\n", sprn);
break;
}
break;
@@ -629,36 +272,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
4, 0);
break;
- case 978: /* tlbwe */
- emulated = kvmppc_emul_tlbwe(vcpu, inst);
- break;
-
- case 914: { /* tlbsx */
- int index;
- unsigned int as = get_mmucr_sts(vcpu);
- unsigned int pid = get_mmucr_stid(vcpu);
-
- rt = get_rt(inst);
- ra = get_ra(inst);
- rb = get_rb(inst);
- rc = get_rc(inst);
-
- ea = vcpu->arch.gpr[rb];
- if (ra)
- ea += vcpu->arch.gpr[ra];
-
- index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
- if (rc) {
- if (index < 0)
- vcpu->arch.cr &= ~0x20000000;
- else
- vcpu->arch.cr |= 0x20000000;
- }
- vcpu->arch.gpr[rt] = index;
-
- }
- break;
-
case 790: /* lhbrx */
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
@@ -674,14 +287,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
2, 0);
break;
- case 966: /* iccci */
- break;
-
default:
- printk("unknown: op %d xop %d\n", get_op(inst),
- get_xop(inst));
+ /* Attempt core-specific emulation below. */
emulated = EMULATE_FAIL;
- break;
}
break;
@@ -764,12 +372,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
break;
default:
- printk("unknown op %d\n", get_op(inst));
emulated = EMULATE_FAIL;
- break;
}
- KVMTRACE_3D(PPC_INSTR, vcpu, inst, vcpu->arch.pc, emulated, entryexit);
+ if (emulated == EMULATE_FAIL) {
+ emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
+ if (emulated == EMULATE_FAIL) {
+ advance = 0;
+ printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
+ "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
+ }
+ }
+
+ KVMTRACE_3D(PPC_INSTR, vcpu, inst, (int)vcpu->arch.pc, emulated, entryexit);
if (advance)
vcpu->arch.pc += 4; /* Advance past emulated instruction. */
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index fda9baada132..2822c8ccfaaf 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -28,7 +28,8 @@
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
#include <asm/tlbflush.h>
-
+#include "timing.h"
+#include "../mm/mmu_decl.h"
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{
@@ -98,14 +99,7 @@ void kvm_arch_hardware_unsetup(void)
void kvm_arch_check_processor_compat(void *rtn)
{
- int r;
-
- if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
- r = 0;
- else
- r = -ENOTSUPP;
-
- *(int *)rtn = r;
+ *(int *)rtn = kvmppc_core_check_processor_compat();
}
struct kvm *kvm_arch_create_vm(void)
@@ -143,9 +137,6 @@ int kvm_dev_ioctl_check_extension(long ext)
int r;
switch (ext) {
- case KVM_CAP_USER_MEMORY:
- r = 1;
- break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
@@ -178,30 +169,15 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
struct kvm_vcpu *vcpu;
- int err;
-
- vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
- if (!vcpu) {
- err = -ENOMEM;
- goto out;
- }
-
- err = kvm_vcpu_init(vcpu, kvm, id);
- if (err)
- goto free_vcpu;
-
+ vcpu = kvmppc_core_vcpu_create(kvm, id);
+ kvmppc_create_vcpu_debugfs(vcpu, id);
return vcpu;
-
-free_vcpu:
- kmem_cache_free(kvm_vcpu_cache, vcpu);
-out:
- return ERR_PTR(err);
}
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
- kvm_vcpu_uninit(vcpu);
- kmem_cache_free(kvm_vcpu_cache, vcpu);
+ kvmppc_remove_vcpu_debugfs(vcpu);
+ kvmppc_core_vcpu_free(vcpu);
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -211,16 +187,14 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
- unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER];
-
- return test_bit(priority, &vcpu->arch.pending_exceptions);
+ return kvmppc_core_pending_dec(vcpu);
}
static void kvmppc_decrementer_func(unsigned long data)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
- kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER);
+ kvmppc_core_queue_dec(vcpu);
if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
@@ -241,96 +215,25 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
kvmppc_core_destroy_mmu(vcpu);
}
-/* Note: clearing MSR[DE] just means that the debug interrupt will not be
- * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
- * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
- * will be delivered as an "imprecise debug event" (which is indicated by
- * DBSR[IDE].
- */
-static void kvmppc_disable_debug_interrupts(void)
-{
- mtmsr(mfmsr() & ~MSR_DE);
-}
-
-static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu)
-{
- kvmppc_disable_debug_interrupts();
-
- mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
- mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
- mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
- mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
- mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
- mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
- mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
- mtmsr(vcpu->arch.host_msr);
-}
-
-static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu)
-{
- struct kvm_guest_debug *dbg = &vcpu->guest_debug;
- u32 dbcr0 = 0;
-
- vcpu->arch.host_msr = mfmsr();
- kvmppc_disable_debug_interrupts();
-
- /* Save host debug register state. */
- vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
- vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
- vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
- vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
- vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
- vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
- vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
-
- /* set registers up for guest */
-
- if (dbg->bp[0]) {
- mtspr(SPRN_IAC1, dbg->bp[0]);
- dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
- }
- if (dbg->bp[1]) {
- mtspr(SPRN_IAC2, dbg->bp[1]);
- dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
- }
- if (dbg->bp[2]) {
- mtspr(SPRN_IAC3, dbg->bp[2]);
- dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
- }
- if (dbg->bp[3]) {
- mtspr(SPRN_IAC4, dbg->bp[3]);
- dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
- }
-
- mtspr(SPRN_DBCR0, dbcr0);
- mtspr(SPRN_DBCR1, 0);
- mtspr(SPRN_DBCR2, 0);
-}
-
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- int i;
-
if (vcpu->guest_debug.enabled)
- kvmppc_load_guest_debug_registers(vcpu);
+ kvmppc_core_load_guest_debugstate(vcpu);
- /* Mark every guest entry in the shadow TLB entry modified, so that they
- * will all be reloaded on the next vcpu run (instead of being
- * demand-faulted). */
- for (i = 0; i <= tlb_44x_hwater; i++)
- kvmppc_tlbe_set_modified(vcpu, i);
+ kvmppc_core_vcpu_load(vcpu, cpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
if (vcpu->guest_debug.enabled)
- kvmppc_restore_host_debug_state(vcpu);
+ kvmppc_core_load_host_debugstate(vcpu);
/* Don't leave guest TLB entries resident when being de-scheduled. */
/* XXX It would be nice to differentiate between heavyweight exit and
* sched_out here, since we could avoid the TLB flush for heavyweight
* exits. */
- _tlbia();
+ _tlbil_all();
+ kvmppc_core_vcpu_put(vcpu);
}
int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
@@ -354,14 +257,14 @@ int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
- u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
+ ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
*gpr = run->dcr.data;
}
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
- u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
+ ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
if (run->mmio.len > sizeof(*gpr)) {
printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
@@ -459,7 +362,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu->arch.dcr_needed = 0;
}
- kvmppc_check_and_deliver_interrupts(vcpu);
+ kvmppc_core_deliver_interrupts(vcpu);
local_irq_disable();
kvm_guest_enter();
@@ -477,7 +380,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{
- kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL);
+ kvmppc_core_queue_external(vcpu, irq);
if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
new file mode 100644
index 000000000000..47ee603f558e
--- /dev/null
+++ b/arch/powerpc/kvm/timing.c
@@ -0,0 +1,239 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include <asm/time.h>
+#include <asm-generic/div64.h>
+
+#include "timing.h"
+
+void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
+{
+ int i;
+
+ /* pause guest execution to avoid concurrent updates */
+ local_irq_disable();
+ mutex_lock(&vcpu->mutex);
+
+ vcpu->arch.last_exit_type = 0xDEAD;
+ for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
+ vcpu->arch.timing_count_type[i] = 0;
+ vcpu->arch.timing_max_duration[i] = 0;
+ vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF;
+ vcpu->arch.timing_sum_duration[i] = 0;
+ vcpu->arch.timing_sum_quad_duration[i] = 0;
+ }
+ vcpu->arch.timing_last_exit = 0;
+ vcpu->arch.timing_exit.tv64 = 0;
+ vcpu->arch.timing_last_enter.tv64 = 0;
+
+ mutex_unlock(&vcpu->mutex);
+ local_irq_enable();
+}
+
+static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
+{
+ u64 old;
+
+ do_div(duration, tb_ticks_per_usec);
+ if (unlikely(duration > 0xFFFFFFFF)) {
+ printk(KERN_ERR"%s - duration too big -> overflow"
+ " duration %lld type %d exit #%d\n",
+ __func__, duration, type,
+ vcpu->arch.timing_count_type[type]);
+ return;
+ }
+
+ vcpu->arch.timing_count_type[type]++;
+
+ /* sum */
+ old = vcpu->arch.timing_sum_duration[type];
+ vcpu->arch.timing_sum_duration[type] += duration;
+ if (unlikely(old > vcpu->arch.timing_sum_duration[type])) {
+ printk(KERN_ERR"%s - wrap adding sum of durations"
+ " old %lld new %lld type %d exit # of type %d\n",
+ __func__, old, vcpu->arch.timing_sum_duration[type],
+ type, vcpu->arch.timing_count_type[type]);
+ }
+
+ /* square sum */
+ old = vcpu->arch.timing_sum_quad_duration[type];
+ vcpu->arch.timing_sum_quad_duration[type] += (duration*duration);
+ if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) {
+ printk(KERN_ERR"%s - wrap adding sum of squared durations"
+ " old %lld new %lld type %d exit # of type %d\n",
+ __func__, old,
+ vcpu->arch.timing_sum_quad_duration[type],
+ type, vcpu->arch.timing_count_type[type]);
+ }
+
+ /* set min/max */
+ if (unlikely(duration < vcpu->arch.timing_min_duration[type]))
+ vcpu->arch.timing_min_duration[type] = duration;
+ if (unlikely(duration > vcpu->arch.timing_max_duration[type]))
+ vcpu->arch.timing_max_duration[type] = duration;
+}
+
+void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu)
+{
+ u64 exit = vcpu->arch.timing_last_exit;
+ u64 enter = vcpu->arch.timing_last_enter.tv64;
+
+ /* save exit time, used next exit when the reenter time is known */
+ vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64;
+
+ if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0))
+ return; /* skip incomplete cycle (e.g. after reset) */
+
+ /* update statistics for average and standard deviation */
+ add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type);
+ /* enter -> timing_last_exit is time spent in guest - log this too */
+ add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter),
+ TIMEINGUEST);
+}
+
+static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = {
+ [MMIO_EXITS] = "MMIO",
+ [DCR_EXITS] = "DCR",
+ [SIGNAL_EXITS] = "SIGNAL",
+ [ITLB_REAL_MISS_EXITS] = "ITLBREAL",
+ [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT",
+ [DTLB_REAL_MISS_EXITS] = "DTLBREAL",
+ [DTLB_VIRT_MISS_EXITS] = "DTLBVIRT",
+ [SYSCALL_EXITS] = "SYSCALL",
+ [ISI_EXITS] = "ISI",
+ [DSI_EXITS] = "DSI",
+ [EMULATED_INST_EXITS] = "EMULINST",
+ [EMULATED_MTMSRWE_EXITS] = "EMUL_WAIT",
+ [EMULATED_WRTEE_EXITS] = "EMUL_WRTEE",
+ [EMULATED_MTSPR_EXITS] = "EMUL_MTSPR",
+ [EMULATED_MFSPR_EXITS] = "EMUL_MFSPR",
+ [EMULATED_MTMSR_EXITS] = "EMUL_MTMSR",
+ [EMULATED_MFMSR_EXITS] = "EMUL_MFMSR",
+ [EMULATED_TLBSX_EXITS] = "EMUL_TLBSX",
+ [EMULATED_TLBWE_EXITS] = "EMUL_TLBWE",
+ [EMULATED_RFI_EXITS] = "EMUL_RFI",
+ [DEC_EXITS] = "DEC",
+ [EXT_INTR_EXITS] = "EXTINT",
+ [HALT_WAKEUP] = "HALT",
+ [USR_PR_INST] = "USR_PR_INST",
+ [FP_UNAVAIL] = "FP_UNAVAIL",
+ [DEBUG_EXITS] = "DEBUG",
+ [TIMEINGUEST] = "TIMEINGUEST"
+};
+
+static int kvmppc_exit_timing_show(struct seq_file *m, void *private)
+{
+ struct kvm_vcpu *vcpu = m->private;
+ int i;
+
+ seq_printf(m, "%s", "type count min max sum sum_squared\n");
+
+ for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
+ seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n",
+ kvm_exit_names[i],
+ vcpu->arch.timing_count_type[i],
+ vcpu->arch.timing_min_duration[i],
+ vcpu->arch.timing_max_duration[i],
+ vcpu->arch.timing_sum_duration[i],
+ vcpu->arch.timing_sum_quad_duration[i]);
+ }
+ return 0;
+}
+
+/* Write 'c' to clear the timing statistics. */
+static ssize_t kvmppc_exit_timing_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int err = -EINVAL;
+ char c;
+
+ if (count > 1) {
+ goto done;
+ }
+
+ if (get_user(c, user_buf)) {
+ err = -EFAULT;
+ goto done;
+ }
+
+ if (c == 'c') {
+ struct seq_file *seqf = (struct seq_file *)file->private_data;
+ struct kvm_vcpu *vcpu = seqf->private;
+ /* Write does not affect our buffers previously generated with
+ * show. seq_file is locked here to prevent races of init with
+ * a show call */
+ mutex_lock(&seqf->lock);
+ kvmppc_init_timing_stats(vcpu);
+ mutex_unlock(&seqf->lock);
+ err = count;
+ }
+
+done:
+ return err;
+}
+
+static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, kvmppc_exit_timing_show, inode->i_private);
+}
+
+static struct file_operations kvmppc_exit_timing_fops = {
+ .owner = THIS_MODULE,
+ .open = kvmppc_exit_timing_open,
+ .read = seq_read,
+ .write = kvmppc_exit_timing_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id)
+{
+ static char dbg_fname[50];
+ struct dentry *debugfs_file;
+
+ snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%u_timing",
+ current->pid, id);
+ debugfs_file = debugfs_create_file(dbg_fname, 0666,
+ kvm_debugfs_dir, vcpu,
+ &kvmppc_exit_timing_fops);
+
+ if (!debugfs_file) {
+ printk(KERN_ERR"%s: error creating debugfs file %s\n",
+ __func__, dbg_fname);
+ return;
+ }
+
+ vcpu->arch.debugfs_exit_timing = debugfs_file;
+}
+
+void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.debugfs_exit_timing) {
+ debugfs_remove(vcpu->arch.debugfs_exit_timing);
+ vcpu->arch.debugfs_exit_timing = NULL;
+ }
+}
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h
new file mode 100644
index 000000000000..bb13b1f3cd5a
--- /dev/null
+++ b/arch/powerpc/kvm/timing.h
@@ -0,0 +1,102 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_EXITTIMING_H__
+#define __POWERPC_KVM_EXITTIMING_H__
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_host.h>
+
+#ifdef CONFIG_KVM_EXIT_TIMING
+void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu);
+void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu);
+void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id);
+void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu);
+
+static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type)
+{
+ vcpu->arch.last_exit_type = type;
+}
+
+#else
+/* if exit timing is not configured there is no need to build the c file */
+static inline void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu,
+ unsigned int id) {}
+static inline void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
+static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) {}
+#endif /* CONFIG_KVM_EXIT_TIMING */
+
+/* account the exit in kvm_stats */
+static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
+{
+ /* type has to be known at build time for optimization */
+ BUILD_BUG_ON(__builtin_constant_p(type));
+ switch (type) {
+ case EXT_INTR_EXITS:
+ vcpu->stat.ext_intr_exits++;
+ break;
+ case DEC_EXITS:
+ vcpu->stat.dec_exits++;
+ break;
+ case EMULATED_INST_EXITS:
+ vcpu->stat.emulated_inst_exits++;
+ break;
+ case DCR_EXITS:
+ vcpu->stat.dcr_exits++;
+ break;
+ case DSI_EXITS:
+ vcpu->stat.dsi_exits++;
+ break;
+ case ISI_EXITS:
+ vcpu->stat.isi_exits++;
+ break;
+ case SYSCALL_EXITS:
+ vcpu->stat.syscall_exits++;
+ break;
+ case DTLB_REAL_MISS_EXITS:
+ vcpu->stat.dtlb_real_miss_exits++;
+ break;
+ case DTLB_VIRT_MISS_EXITS:
+ vcpu->stat.dtlb_virt_miss_exits++;
+ break;
+ case MMIO_EXITS:
+ vcpu->stat.mmio_exits++;
+ break;
+ case ITLB_REAL_MISS_EXITS:
+ vcpu->stat.itlb_real_miss_exits++;
+ break;
+ case ITLB_VIRT_MISS_EXITS:
+ vcpu->stat.itlb_virt_miss_exits++;
+ break;
+ case SIGNAL_EXITS:
+ vcpu->stat.signal_exits++;
+ break;
+ }
+}
+
+/* wrapper to set exit time and account for it in kvm_stats */
+static inline void kvmppc_account_exit(struct kvm_vcpu *vcpu, int type)
+{
+ kvmppc_set_exit_type(vcpu, type);
+ kvmppc_account_exit_stat(vcpu, type);
+}
+
+#endif /* __POWERPC_KVM_EXITTIMING_H__ */
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index d69912c07ce7..8db35278a4b4 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -6,6 +6,9 @@ ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc
endif
+CFLAGS_REMOVE_code-patching.o = -pg
+CFLAGS_REMOVE_feature-fixups.o = -pg
+
obj-y := string.o alloc.o \
checksum_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 25ec5378afa4..70693a5c12a1 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -26,11 +26,24 @@ _GLOBAL(__copy_tofrom_user)
andi. r6,r6,7
PPC_MTOCRF 0x01,r5
blt cr1,.Lshort_copy
+/* Below we want to nop out the bne if we're on a CPU that has the
+ * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
+ * cleared.
+ * At the time of writing the only CPU that has this combination of bits
+ * set is Power6.
+ */
+BEGIN_FTR_SECTION
+ nop
+FTR_SECTION_ELSE
bne .Ldst_unaligned
+ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
+ CPU_FTR_UNALIGNED_LD_STD)
.Ldst_aligned:
- andi. r0,r4,7
addi r3,r3,-16
+BEGIN_FTR_SECTION
+ andi. r0,r4,7
bne .Lsrc_unaligned
+END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
srdi r7,r5,4
20: ld r9,0(r4)
addi r4,r4,-8
@@ -138,7 +151,7 @@ _GLOBAL(__copy_tofrom_user)
PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */
subf r5,r6,r5
li r7,0
- cmpldi r1,r5,16
+ cmpldi cr1,r5,16
bf cr7*4+3,1f
35: lbz r0,0(r4)
81: stb r0,0(r3)
diff --git a/arch/powerpc/lib/dma-noncoherent.c b/arch/powerpc/lib/dma-noncoherent.c
index 31734c0969cd..b7dc4c19f582 100644
--- a/arch/powerpc/lib/dma-noncoherent.c
+++ b/arch/powerpc/lib/dma-noncoherent.c
@@ -77,26 +77,26 @@ static DEFINE_SPINLOCK(consistent_lock);
* the amount of RAM found at boot time.) I would imagine that get_vm_area()
* would have to initialise this each time prior to calling vm_region_alloc().
*/
-struct vm_region {
+struct ppc_vm_region {
struct list_head vm_list;
unsigned long vm_start;
unsigned long vm_end;
};
-static struct vm_region consistent_head = {
+static struct ppc_vm_region consistent_head = {
.vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
.vm_start = CONSISTENT_BASE,
.vm_end = CONSISTENT_END,
};
-static struct vm_region *
-vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
+static struct ppc_vm_region *
+ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
{
unsigned long addr = head->vm_start, end = head->vm_end - size;
unsigned long flags;
- struct vm_region *c, *new;
+ struct ppc_vm_region *c, *new;
- new = kmalloc(sizeof(struct vm_region), gfp);
+ new = kmalloc(sizeof(struct ppc_vm_region), gfp);
if (!new)
goto out;
@@ -130,9 +130,9 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
return NULL;
}
-static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
+static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
{
- struct vm_region *c;
+ struct ppc_vm_region *c;
list_for_each_entry(c, &head->vm_list, vm_list) {
if (c->vm_start == addr)
@@ -151,7 +151,7 @@ void *
__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
{
struct page *page;
- struct vm_region *c;
+ struct ppc_vm_region *c;
unsigned long order;
u64 mask = 0x00ffffff, limit; /* ISA default */
@@ -191,7 +191,7 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
/*
* Allocate a virtual address in the consistent mapping region.
*/
- c = vm_region_alloc(&consistent_head, size,
+ c = ppc_vm_region_alloc(&consistent_head, size,
gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
if (c) {
unsigned long vaddr = c->vm_start;
@@ -239,7 +239,7 @@ EXPORT_SYMBOL(__dma_alloc_coherent);
*/
void __dma_free_coherent(size_t size, void *vaddr)
{
- struct vm_region *c;
+ struct ppc_vm_region *c;
unsigned long flags, addr;
pte_t *ptep;
@@ -247,7 +247,7 @@ void __dma_free_coherent(size_t size, void *vaddr)
spin_lock_irqsave(&consistent_lock, flags);
- c = vm_region_find(&consistent_head, (unsigned long)vaddr);
+ c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
if (!c)
goto no_area;
@@ -320,7 +320,6 @@ static int __init dma_alloc_init(void)
ret = -ENOMEM;
break;
}
- WARN_ON(!pmd_none(*pmd));
pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
if (!pte) {
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index 3f131129d1c1..fe2d34e5332d 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -18,11 +18,23 @@ _GLOBAL(memcpy)
andi. r6,r6,7
dcbt 0,r4
blt cr1,.Lshort_copy
+/* Below we want to nop out the bne if we're on a CPU that has the
+ CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
+ cleared.
+ At the time of writing the only CPU that has this combination of bits
+ set is Power6. */
+BEGIN_FTR_SECTION
+ nop
+FTR_SECTION_ELSE
bne .Ldst_unaligned
+ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
+ CPU_FTR_UNALIGNED_LD_STD)
.Ldst_aligned:
- andi. r0,r4,7
addi r3,r3,-16
+BEGIN_FTR_SECTION
+ andi. r0,r4,7
bne .Lsrc_unaligned
+END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
srdi r7,r5,4
ld r9,0(r4)
addi r4,r4,-8
@@ -131,7 +143,7 @@ _GLOBAL(memcpy)
PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7
subf r5,r6,r5
li r7,0
- cmpldi r1,r5,16
+ cmpldi cr1,r5,16
bf cr7*4+3,1f
lbz r0,0(r4)
stb r0,0(r3)
diff --git a/arch/powerpc/math-emu/Makefile b/arch/powerpc/math-emu/Makefile
index 03aa98dd9f0a..f9e506a735ae 100644
--- a/arch/powerpc/math-emu/Makefile
+++ b/arch/powerpc/math-emu/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_MATH_EMULATION) += fabs.o fadd.o fadds.o fcmpo.o fcmpu.o \
mcrfs.o mffs.o mtfsb0.o mtfsb1.o \
mtfsf.o mtfsfi.o stfiwx.o stfs.o
+obj-$(CONFIG_SPE) += math_efp.o
+
CFLAGS_fabs.o = -fno-builtin-fabs
CFLAGS_math.o = -fno-builtin-fabs
diff --git a/arch/powerpc/math-emu/fadd.c b/arch/powerpc/math-emu/fadd.c
index 04d3b4aa32ce..0158a16e2b82 100644
--- a/arch/powerpc/math-emu/fadd.c
+++ b/arch/powerpc/math-emu/fadd.c
@@ -13,7 +13,6 @@ fadd(void *frD, void *frA, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
diff --git a/arch/powerpc/math-emu/fcmpo.c b/arch/powerpc/math-emu/fcmpo.c
index b5dc4498cd71..5bce011c2aec 100644
--- a/arch/powerpc/math-emu/fcmpo.c
+++ b/arch/powerpc/math-emu/fcmpo.c
@@ -14,7 +14,6 @@ fcmpo(u32 *ccr, int crfD, void *frA, void *frB)
FP_DECL_EX;
int code[4] = { (1 << 3), (1 << 1), (1 << 2), (1 << 0) };
long cmp;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p (%08x) %d %p %p\n", __func__, ccr, *ccr, crfD, frA, frB);
@@ -29,7 +28,7 @@ fcmpo(u32 *ccr, int crfD, void *frA, void *frB)
#endif
if (A_c == FP_CLS_NAN || B_c == FP_CLS_NAN)
- ret |= EFLAG_VXVC;
+ FP_SET_EXCEPTION(EFLAG_VXVC);
FP_CMP_D(cmp, A, B, 2);
cmp = code[(cmp + 1) & 3];
@@ -44,5 +43,5 @@ fcmpo(u32 *ccr, int crfD, void *frA, void *frB)
printk("CR: %08x\n", *ccr);
#endif
- return ret;
+ return FP_CUR_EXCEPTIONS;
}
diff --git a/arch/powerpc/math-emu/fdiv.c b/arch/powerpc/math-emu/fdiv.c
index 2db15097d98e..a29239c05e3e 100644
--- a/arch/powerpc/math-emu/fdiv.c
+++ b/arch/powerpc/math-emu/fdiv.c
@@ -13,7 +13,6 @@ fdiv(void *frD, void *frA, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -28,22 +27,22 @@ fdiv(void *frD, void *frA, void *frB)
#endif
if (A_c == FP_CLS_ZERO && B_c == FP_CLS_ZERO) {
- ret |= EFLAG_VXZDZ;
+ FP_SET_EXCEPTION(EFLAG_VXZDZ);
#ifdef DEBUG
printk("%s: FPSCR_VXZDZ raised\n", __func__);
#endif
}
if (A_c == FP_CLS_INF && B_c == FP_CLS_INF) {
- ret |= EFLAG_VXIDI;
+ FP_SET_EXCEPTION(EFLAG_VXIDI);
#ifdef DEBUG
printk("%s: FPSCR_VXIDI raised\n", __func__);
#endif
}
if (B_c == FP_CLS_ZERO && A_c != FP_CLS_ZERO) {
- ret |= EFLAG_DIVZERO;
+ FP_SET_EXCEPTION(EFLAG_DIVZERO);
if (__FPU_TRAP_P(EFLAG_DIVZERO))
- return ret;
+ return FP_CUR_EXCEPTIONS;
}
FP_DIV_D(R, A, B);
diff --git a/arch/powerpc/math-emu/fdivs.c b/arch/powerpc/math-emu/fdivs.c
index 797f6a9a20b5..526bc261275f 100644
--- a/arch/powerpc/math-emu/fdivs.c
+++ b/arch/powerpc/math-emu/fdivs.c
@@ -14,7 +14,6 @@ fdivs(void *frD, void *frA, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -29,22 +28,22 @@ fdivs(void *frD, void *frA, void *frB)
#endif
if (A_c == FP_CLS_ZERO && B_c == FP_CLS_ZERO) {
- ret |= EFLAG_VXZDZ;
+ FP_SET_EXCEPTION(EFLAG_VXZDZ);
#ifdef DEBUG
printk("%s: FPSCR_VXZDZ raised\n", __func__);
#endif
}
if (A_c == FP_CLS_INF && B_c == FP_CLS_INF) {
- ret |= EFLAG_VXIDI;
+ FP_SET_EXCEPTION(EFLAG_VXIDI);
#ifdef DEBUG
printk("%s: FPSCR_VXIDI raised\n", __func__);
#endif
}
if (B_c == FP_CLS_ZERO && A_c != FP_CLS_ZERO) {
- ret |= EFLAG_DIVZERO;
+ FP_SET_EXCEPTION(EFLAG_DIVZERO);
if (__FPU_TRAP_P(EFLAG_DIVZERO))
- return ret;
+ return FP_CUR_EXCEPTIONS;
}
FP_DIV_D(R, A, B);
diff --git a/arch/powerpc/math-emu/fmadd.c b/arch/powerpc/math-emu/fmadd.c
index 925313aa6f82..8c3f20aa5a95 100644
--- a/arch/powerpc/math-emu/fmadd.c
+++ b/arch/powerpc/math-emu/fmadd.c
@@ -15,7 +15,6 @@ fmadd(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -33,12 +32,12 @@ fmadd(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fmadds.c b/arch/powerpc/math-emu/fmadds.c
index aea80ef79399..794fb31e59d1 100644
--- a/arch/powerpc/math-emu/fmadds.c
+++ b/arch/powerpc/math-emu/fmadds.c
@@ -16,7 +16,6 @@ fmadds(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -34,12 +33,12 @@ fmadds(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fmsub.c b/arch/powerpc/math-emu/fmsub.c
index a644d525fca6..626f6fed84ac 100644
--- a/arch/powerpc/math-emu/fmsub.c
+++ b/arch/powerpc/math-emu/fmsub.c
@@ -15,7 +15,6 @@ fmsub(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -33,7 +32,7 @@ fmsub(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
@@ -41,7 +40,7 @@ fmsub(void *frD, void *frA, void *frB, void *frC)
B_s ^= 1;
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fmsubs.c b/arch/powerpc/math-emu/fmsubs.c
index 2fdeeb9bb569..3425bc899760 100644
--- a/arch/powerpc/math-emu/fmsubs.c
+++ b/arch/powerpc/math-emu/fmsubs.c
@@ -16,7 +16,6 @@ fmsubs(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -34,7 +33,7 @@ fmsubs(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
@@ -42,7 +41,7 @@ fmsubs(void *frD, void *frA, void *frB, void *frC)
B_s ^= 1;
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fmul.c b/arch/powerpc/math-emu/fmul.c
index 391fd17d3440..2c1929779892 100644
--- a/arch/powerpc/math-emu/fmul.c
+++ b/arch/powerpc/math-emu/fmul.c
@@ -13,7 +13,6 @@ fmul(void *frD, void *frA, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -31,7 +30,7 @@ fmul(void *frD, void *frA, void *frB)
if ((A_c == FP_CLS_INF && B_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && B_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(R, A, B);
diff --git a/arch/powerpc/math-emu/fmuls.c b/arch/powerpc/math-emu/fmuls.c
index 2d3ec5f7da20..f5ad5c9c77d0 100644
--- a/arch/powerpc/math-emu/fmuls.c
+++ b/arch/powerpc/math-emu/fmuls.c
@@ -14,7 +14,6 @@ fmuls(void *frD, void *frA, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -32,7 +31,7 @@ fmuls(void *frD, void *frA, void *frB)
if ((A_c == FP_CLS_INF && B_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && B_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(R, A, B);
diff --git a/arch/powerpc/math-emu/fnmadd.c b/arch/powerpc/math-emu/fnmadd.c
index 2497b86494e5..e817bc5453ef 100644
--- a/arch/powerpc/math-emu/fnmadd.c
+++ b/arch/powerpc/math-emu/fnmadd.c
@@ -15,7 +15,6 @@ fnmadd(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -33,12 +32,12 @@ fnmadd(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fnmadds.c b/arch/powerpc/math-emu/fnmadds.c
index ee9d71e0b376..4db4b7d9ba8d 100644
--- a/arch/powerpc/math-emu/fnmadds.c
+++ b/arch/powerpc/math-emu/fnmadds.c
@@ -16,7 +16,6 @@ fnmadds(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -34,12 +33,12 @@ fnmadds(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fnmsub.c b/arch/powerpc/math-emu/fnmsub.c
index 3885a77acc93..f65979fa770e 100644
--- a/arch/powerpc/math-emu/fnmsub.c
+++ b/arch/powerpc/math-emu/fnmsub.c
@@ -15,7 +15,6 @@ fnmsub(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -33,7 +32,7 @@ fnmsub(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
@@ -41,7 +40,7 @@ fnmsub(void *frD, void *frA, void *frB, void *frC)
B_s ^= 1;
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fnmsubs.c b/arch/powerpc/math-emu/fnmsubs.c
index f835dfeb0fd1..9021dacc03b8 100644
--- a/arch/powerpc/math-emu/fnmsubs.c
+++ b/arch/powerpc/math-emu/fnmsubs.c
@@ -16,7 +16,6 @@ fnmsubs(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -34,7 +33,7 @@ fnmsubs(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
@@ -42,7 +41,7 @@ fnmsubs(void *frD, void *frA, void *frB, void *frC)
B_s ^= 1;
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fsqrt.c b/arch/powerpc/math-emu/fsqrt.c
index 3e90072693a0..a55fc7d49983 100644
--- a/arch/powerpc/math-emu/fsqrt.c
+++ b/arch/powerpc/math-emu/fsqrt.c
@@ -12,7 +12,6 @@ fsqrt(void *frD, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frB);
@@ -25,9 +24,9 @@ fsqrt(void *frD, void *frB)
#endif
if (B_s && B_c != FP_CLS_ZERO)
- ret |= EFLAG_VXSQRT;
+ FP_SET_EXCEPTION(EFLAG_VXSQRT);
if (B_c == FP_CLS_NAN)
- ret |= EFLAG_VXSNAN;
+ FP_SET_EXCEPTION(EFLAG_VXSNAN);
FP_SQRT_D(R, B);
diff --git a/arch/powerpc/math-emu/fsqrts.c b/arch/powerpc/math-emu/fsqrts.c
index 2843be986e2e..31dccbfc39ff 100644
--- a/arch/powerpc/math-emu/fsqrts.c
+++ b/arch/powerpc/math-emu/fsqrts.c
@@ -13,7 +13,6 @@ fsqrts(void *frD, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frB);
@@ -26,9 +25,9 @@ fsqrts(void *frD, void *frB)
#endif
if (B_s && B_c != FP_CLS_ZERO)
- ret |= EFLAG_VXSQRT;
+ FP_SET_EXCEPTION(EFLAG_VXSQRT);
if (B_c == FP_CLS_NAN)
- ret |= EFLAG_VXSNAN;
+ FP_SET_EXCEPTION(EFLAG_VXSNAN);
FP_SQRT_D(R, B);
diff --git a/arch/powerpc/math-emu/fsub.c b/arch/powerpc/math-emu/fsub.c
index 78b09446a0e1..02c5dff458ba 100644
--- a/arch/powerpc/math-emu/fsub.c
+++ b/arch/powerpc/math-emu/fsub.c
@@ -13,7 +13,6 @@ fsub(void *frD, void *frA, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -31,7 +30,7 @@ fsub(void *frD, void *frA, void *frB)
B_s ^= 1;
if (A_s != B_s && A_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, A, B);
diff --git a/arch/powerpc/math-emu/fsubs.c b/arch/powerpc/math-emu/fsubs.c
index d3bf90863cf2..5d9b18c35e07 100644
--- a/arch/powerpc/math-emu/fsubs.c
+++ b/arch/powerpc/math-emu/fsubs.c
@@ -14,7 +14,6 @@ fsubs(void *frD, void *frA, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -32,7 +31,7 @@ fsubs(void *frD, void *frA, void *frB)
B_s ^= 1;
if (A_s != B_s && A_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, A, B);
diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c
new file mode 100644
index 000000000000..41f4ef30e480
--- /dev/null
+++ b/arch/powerpc/math-emu/math_efp.c
@@ -0,0 +1,720 @@
+/*
+ * arch/powerpc/math-emu/math_efp.c
+ *
+ * Copyright (C) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Ebony Zhu, <ebony.zhu@freescale.com>
+ * Yu Liu, <yu.liu@freescale.com>
+ *
+ * Derived from arch/alpha/math-emu/math.c
+ * arch/powerpc/math-emu/math.c
+ *
+ * Description:
+ * This file is the exception handler to make E500 SPE instructions
+ * fully comply with IEEE-754 floating point standard.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+
+#include <asm/uaccess.h>
+#include <asm/reg.h>
+
+#define FP_EX_BOOKE_E500_SPE
+#include <asm/sfp-machine.h>
+
+#include <math-emu/soft-fp.h>
+#include <math-emu/single.h>
+#include <math-emu/double.h>
+
+#define EFAPU 0x4
+
+#define VCT 0x4
+#define SPFP 0x6
+#define DPFP 0x7
+
+#define EFSADD 0x2c0
+#define EFSSUB 0x2c1
+#define EFSABS 0x2c4
+#define EFSNABS 0x2c5
+#define EFSNEG 0x2c6
+#define EFSMUL 0x2c8
+#define EFSDIV 0x2c9
+#define EFSCMPGT 0x2cc
+#define EFSCMPLT 0x2cd
+#define EFSCMPEQ 0x2ce
+#define EFSCFD 0x2cf
+#define EFSCFSI 0x2d1
+#define EFSCTUI 0x2d4
+#define EFSCTSI 0x2d5
+#define EFSCTUF 0x2d6
+#define EFSCTSF 0x2d7
+#define EFSCTUIZ 0x2d8
+#define EFSCTSIZ 0x2da
+
+#define EVFSADD 0x280
+#define EVFSSUB 0x281
+#define EVFSABS 0x284
+#define EVFSNABS 0x285
+#define EVFSNEG 0x286
+#define EVFSMUL 0x288
+#define EVFSDIV 0x289
+#define EVFSCMPGT 0x28c
+#define EVFSCMPLT 0x28d
+#define EVFSCMPEQ 0x28e
+#define EVFSCTUI 0x294
+#define EVFSCTSI 0x295
+#define EVFSCTUF 0x296
+#define EVFSCTSF 0x297
+#define EVFSCTUIZ 0x298
+#define EVFSCTSIZ 0x29a
+
+#define EFDADD 0x2e0
+#define EFDSUB 0x2e1
+#define EFDABS 0x2e4
+#define EFDNABS 0x2e5
+#define EFDNEG 0x2e6
+#define EFDMUL 0x2e8
+#define EFDDIV 0x2e9
+#define EFDCTUIDZ 0x2ea
+#define EFDCTSIDZ 0x2eb
+#define EFDCMPGT 0x2ec
+#define EFDCMPLT 0x2ed
+#define EFDCMPEQ 0x2ee
+#define EFDCFS 0x2ef
+#define EFDCTUI 0x2f4
+#define EFDCTSI 0x2f5
+#define EFDCTUF 0x2f6
+#define EFDCTSF 0x2f7
+#define EFDCTUIZ 0x2f8
+#define EFDCTSIZ 0x2fa
+
+#define AB 2
+#define XA 3
+#define XB 4
+#define XCR 5
+#define NOTYPE 0
+
+#define SIGN_BIT_S (1UL << 31)
+#define SIGN_BIT_D (1ULL << 63)
+#define FP_EX_MASK (FP_EX_INEXACT | FP_EX_INVALID | FP_EX_DIVZERO | \
+ FP_EX_UNDERFLOW | FP_EX_OVERFLOW)
+
+union dw_union {
+ u64 dp[1];
+ u32 wp[2];
+};
+
+static unsigned long insn_type(unsigned long speinsn)
+{
+ unsigned long ret = NOTYPE;
+
+ switch (speinsn & 0x7ff) {
+ case EFSABS: ret = XA; break;
+ case EFSADD: ret = AB; break;
+ case EFSCFD: ret = XB; break;
+ case EFSCMPEQ: ret = XCR; break;
+ case EFSCMPGT: ret = XCR; break;
+ case EFSCMPLT: ret = XCR; break;
+ case EFSCTSF: ret = XB; break;
+ case EFSCTSI: ret = XB; break;
+ case EFSCTSIZ: ret = XB; break;
+ case EFSCTUF: ret = XB; break;
+ case EFSCTUI: ret = XB; break;
+ case EFSCTUIZ: ret = XB; break;
+ case EFSDIV: ret = AB; break;
+ case EFSMUL: ret = AB; break;
+ case EFSNABS: ret = XA; break;
+ case EFSNEG: ret = XA; break;
+ case EFSSUB: ret = AB; break;
+ case EFSCFSI: ret = XB; break;
+
+ case EVFSABS: ret = XA; break;
+ case EVFSADD: ret = AB; break;
+ case EVFSCMPEQ: ret = XCR; break;
+ case EVFSCMPGT: ret = XCR; break;
+ case EVFSCMPLT: ret = XCR; break;
+ case EVFSCTSF: ret = XB; break;
+ case EVFSCTSI: ret = XB; break;
+ case EVFSCTSIZ: ret = XB; break;
+ case EVFSCTUF: ret = XB; break;
+ case EVFSCTUI: ret = XB; break;
+ case EVFSCTUIZ: ret = XB; break;
+ case EVFSDIV: ret = AB; break;
+ case EVFSMUL: ret = AB; break;
+ case EVFSNABS: ret = XA; break;
+ case EVFSNEG: ret = XA; break;
+ case EVFSSUB: ret = AB; break;
+
+ case EFDABS: ret = XA; break;
+ case EFDADD: ret = AB; break;
+ case EFDCFS: ret = XB; break;
+ case EFDCMPEQ: ret = XCR; break;
+ case EFDCMPGT: ret = XCR; break;
+ case EFDCMPLT: ret = XCR; break;
+ case EFDCTSF: ret = XB; break;
+ case EFDCTSI: ret = XB; break;
+ case EFDCTSIDZ: ret = XB; break;
+ case EFDCTSIZ: ret = XB; break;
+ case EFDCTUF: ret = XB; break;
+ case EFDCTUI: ret = XB; break;
+ case EFDCTUIDZ: ret = XB; break;
+ case EFDCTUIZ: ret = XB; break;
+ case EFDDIV: ret = AB; break;
+ case EFDMUL: ret = AB; break;
+ case EFDNABS: ret = XA; break;
+ case EFDNEG: ret = XA; break;
+ case EFDSUB: ret = AB; break;
+
+ default:
+ printk(KERN_ERR "\nOoops! SPE instruction no type found.");
+ printk(KERN_ERR "\ninst code: %08lx\n", speinsn);
+ }
+
+ return ret;
+}
+
+int do_spe_mathemu(struct pt_regs *regs)
+{
+ FP_DECL_EX;
+ int IR, cmp;
+
+ unsigned long type, func, fc, fa, fb, src, speinsn;
+ union dw_union vc, va, vb;
+
+ if (get_user(speinsn, (unsigned int __user *) regs->nip))
+ return -EFAULT;
+ if ((speinsn >> 26) != EFAPU)
+ return -EINVAL; /* not an spe instruction */
+
+ type = insn_type(speinsn);
+ if (type == NOTYPE)
+ return -ENOSYS;
+
+ func = speinsn & 0x7ff;
+ fc = (speinsn >> 21) & 0x1f;
+ fa = (speinsn >> 16) & 0x1f;
+ fb = (speinsn >> 11) & 0x1f;
+ src = (speinsn >> 5) & 0x7;
+
+ vc.wp[0] = current->thread.evr[fc];
+ vc.wp[1] = regs->gpr[fc];
+ va.wp[0] = current->thread.evr[fa];
+ va.wp[1] = regs->gpr[fa];
+ vb.wp[0] = current->thread.evr[fb];
+ vb.wp[1] = regs->gpr[fb];
+
+ __FPU_FPSCR = mfspr(SPRN_SPEFSCR);
+
+#ifdef DEBUG
+ printk("speinsn:%08lx spefscr:%08lx\n", speinsn, __FPU_FPSCR);
+ printk("vc: %08x %08x\n", vc.wp[0], vc.wp[1]);
+ printk("va: %08x %08x\n", va.wp[0], va.wp[1]);
+ printk("vb: %08x %08x\n", vb.wp[0], vb.wp[1]);
+#endif
+
+ switch (src) {
+ case SPFP: {
+ FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
+
+ switch (type) {
+ case AB:
+ case XCR:
+ FP_UNPACK_SP(SA, va.wp + 1);
+ case XB:
+ FP_UNPACK_SP(SB, vb.wp + 1);
+ break;
+ case XA:
+ FP_UNPACK_SP(SA, va.wp + 1);
+ break;
+ }
+
+#ifdef DEBUG
+ printk("SA: %ld %08lx %ld (%ld)\n", SA_s, SA_f, SA_e, SA_c);
+ printk("SB: %ld %08lx %ld (%ld)\n", SB_s, SB_f, SB_e, SB_c);
+#endif
+
+ switch (func) {
+ case EFSABS:
+ vc.wp[1] = va.wp[1] & ~SIGN_BIT_S;
+ goto update_regs;
+
+ case EFSNABS:
+ vc.wp[1] = va.wp[1] | SIGN_BIT_S;
+ goto update_regs;
+
+ case EFSNEG:
+ vc.wp[1] = va.wp[1] ^ SIGN_BIT_S;
+ goto update_regs;
+
+ case EFSADD:
+ FP_ADD_S(SR, SA, SB);
+ goto pack_s;
+
+ case EFSSUB:
+ FP_SUB_S(SR, SA, SB);
+ goto pack_s;
+
+ case EFSMUL:
+ FP_MUL_S(SR, SA, SB);
+ goto pack_s;
+
+ case EFSDIV:
+ FP_DIV_S(SR, SA, SB);
+ goto pack_s;
+
+ case EFSCMPEQ:
+ cmp = 0;
+ goto cmp_s;
+
+ case EFSCMPGT:
+ cmp = 1;
+ goto cmp_s;
+
+ case EFSCMPLT:
+ cmp = -1;
+ goto cmp_s;
+
+ case EFSCTSF:
+ case EFSCTUF:
+ if (!((vb.wp[1] >> 23) == 0xff && ((vb.wp[1] & 0x7fffff) > 0))) {
+ /* NaN */
+ if (((vb.wp[1] >> 23) & 0xff) == 0) {
+ /* denorm */
+ vc.wp[1] = 0x0;
+ } else if ((vb.wp[1] >> 31) == 0) {
+ /* positive normal */
+ vc.wp[1] = (func == EFSCTSF) ?
+ 0x7fffffff : 0xffffffff;
+ } else { /* negative normal */
+ vc.wp[1] = (func == EFSCTSF) ?
+ 0x80000000 : 0x0;
+ }
+ } else { /* rB is NaN */
+ vc.wp[1] = 0x0;
+ }
+ goto update_regs;
+
+ case EFSCFD: {
+ FP_DECL_D(DB);
+ FP_CLEAR_EXCEPTIONS;
+ FP_UNPACK_DP(DB, vb.dp);
+#ifdef DEBUG
+ printk("DB: %ld %08lx %08lx %ld (%ld)\n",
+ DB_s, DB_f1, DB_f0, DB_e, DB_c);
+#endif
+ FP_CONV(S, D, 1, 2, SR, DB);
+ goto pack_s;
+ }
+
+ case EFSCTSI:
+ case EFSCTSIZ:
+ case EFSCTUI:
+ case EFSCTUIZ:
+ if (func & 0x4) {
+ _FP_ROUND(1, SB);
+ } else {
+ _FP_ROUND_ZERO(1, SB);
+ }
+ FP_TO_INT_S(vc.wp[1], SB, 32, ((func & 0x3) != 0));
+ goto update_regs;
+
+ default:
+ goto illegal;
+ }
+ break;
+
+pack_s:
+#ifdef DEBUG
+ printk("SR: %ld %08lx %ld (%ld)\n", SR_s, SR_f, SR_e, SR_c);
+#endif
+ FP_PACK_SP(vc.wp + 1, SR);
+ goto update_regs;
+
+cmp_s:
+ FP_CMP_S(IR, SA, SB, 3);
+ if (IR == 3 && (FP_ISSIGNAN_S(SA) || FP_ISSIGNAN_S(SB)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ if (IR == cmp) {
+ IR = 0x4;
+ } else {
+ IR = 0;
+ }
+ goto update_ccr;
+ }
+
+ case DPFP: {
+ FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
+
+ switch (type) {
+ case AB:
+ case XCR:
+ FP_UNPACK_DP(DA, va.dp);
+ case XB:
+ FP_UNPACK_DP(DB, vb.dp);
+ break;
+ case XA:
+ FP_UNPACK_DP(DA, va.dp);
+ break;
+ }
+
+#ifdef DEBUG
+ printk("DA: %ld %08lx %08lx %ld (%ld)\n",
+ DA_s, DA_f1, DA_f0, DA_e, DA_c);
+ printk("DB: %ld %08lx %08lx %ld (%ld)\n",
+ DB_s, DB_f1, DB_f0, DB_e, DB_c);
+#endif
+
+ switch (func) {
+ case EFDABS:
+ vc.dp[0] = va.dp[0] & ~SIGN_BIT_D;
+ goto update_regs;
+
+ case EFDNABS:
+ vc.dp[0] = va.dp[0] | SIGN_BIT_D;
+ goto update_regs;
+
+ case EFDNEG:
+ vc.dp[0] = va.dp[0] ^ SIGN_BIT_D;
+ goto update_regs;
+
+ case EFDADD:
+ FP_ADD_D(DR, DA, DB);
+ goto pack_d;
+
+ case EFDSUB:
+ FP_SUB_D(DR, DA, DB);
+ goto pack_d;
+
+ case EFDMUL:
+ FP_MUL_D(DR, DA, DB);
+ goto pack_d;
+
+ case EFDDIV:
+ FP_DIV_D(DR, DA, DB);
+ goto pack_d;
+
+ case EFDCMPEQ:
+ cmp = 0;
+ goto cmp_d;
+
+ case EFDCMPGT:
+ cmp = 1;
+ goto cmp_d;
+
+ case EFDCMPLT:
+ cmp = -1;
+ goto cmp_d;
+
+ case EFDCTSF:
+ case EFDCTUF:
+ if (!((vb.wp[0] >> 20) == 0x7ff &&
+ ((vb.wp[0] & 0xfffff) > 0 || (vb.wp[1] > 0)))) {
+ /* not a NaN */
+ if (((vb.wp[0] >> 20) & 0x7ff) == 0) {
+ /* denorm */
+ vc.wp[1] = 0x0;
+ } else if ((vb.wp[0] >> 31) == 0) {
+ /* positive normal */
+ vc.wp[1] = (func == EFDCTSF) ?
+ 0x7fffffff : 0xffffffff;
+ } else { /* negative normal */
+ vc.wp[1] = (func == EFDCTSF) ?
+ 0x80000000 : 0x0;
+ }
+ } else { /* NaN */
+ vc.wp[1] = 0x0;
+ }
+ goto update_regs;
+
+ case EFDCFS: {
+ FP_DECL_S(SB);
+ FP_CLEAR_EXCEPTIONS;
+ FP_UNPACK_SP(SB, vb.wp + 1);
+#ifdef DEBUG
+ printk("SB: %ld %08lx %ld (%ld)\n",
+ SB_s, SB_f, SB_e, SB_c);
+#endif
+ FP_CONV(D, S, 2, 1, DR, SB);
+ goto pack_d;
+ }
+
+ case EFDCTUIDZ:
+ case EFDCTSIDZ:
+ _FP_ROUND_ZERO(2, DB);
+ FP_TO_INT_D(vc.dp[0], DB, 64, ((func & 0x1) == 0));
+ goto update_regs;
+
+ case EFDCTUI:
+ case EFDCTSI:
+ case EFDCTUIZ:
+ case EFDCTSIZ:
+ if (func & 0x4) {
+ _FP_ROUND(2, DB);
+ } else {
+ _FP_ROUND_ZERO(2, DB);
+ }
+ FP_TO_INT_D(vc.wp[1], DB, 32, ((func & 0x3) != 0));
+ goto update_regs;
+
+ default:
+ goto illegal;
+ }
+ break;
+
+pack_d:
+#ifdef DEBUG
+ printk("DR: %ld %08lx %08lx %ld (%ld)\n",
+ DR_s, DR_f1, DR_f0, DR_e, DR_c);
+#endif
+ FP_PACK_DP(vc.dp, DR);
+ goto update_regs;
+
+cmp_d:
+ FP_CMP_D(IR, DA, DB, 3);
+ if (IR == 3 && (FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ if (IR == cmp) {
+ IR = 0x4;
+ } else {
+ IR = 0;
+ }
+ goto update_ccr;
+
+ }
+
+ case VCT: {
+ FP_DECL_S(SA0); FP_DECL_S(SB0); FP_DECL_S(SR0);
+ FP_DECL_S(SA1); FP_DECL_S(SB1); FP_DECL_S(SR1);
+ int IR0, IR1;
+
+ switch (type) {
+ case AB:
+ case XCR:
+ FP_UNPACK_SP(SA0, va.wp);
+ FP_UNPACK_SP(SA1, va.wp + 1);
+ case XB:
+ FP_UNPACK_SP(SB0, vb.wp);
+ FP_UNPACK_SP(SB1, vb.wp + 1);
+ break;
+ case XA:
+ FP_UNPACK_SP(SA0, va.wp);
+ FP_UNPACK_SP(SA1, va.wp + 1);
+ break;
+ }
+
+#ifdef DEBUG
+ printk("SA0: %ld %08lx %ld (%ld)\n", SA0_s, SA0_f, SA0_e, SA0_c);
+ printk("SA1: %ld %08lx %ld (%ld)\n", SA1_s, SA1_f, SA1_e, SA1_c);
+ printk("SB0: %ld %08lx %ld (%ld)\n", SB0_s, SB0_f, SB0_e, SB0_c);
+ printk("SB1: %ld %08lx %ld (%ld)\n", SB1_s, SB1_f, SB1_e, SB1_c);
+#endif
+
+ switch (func) {
+ case EVFSABS:
+ vc.wp[0] = va.wp[0] & ~SIGN_BIT_S;
+ vc.wp[1] = va.wp[1] & ~SIGN_BIT_S;
+ goto update_regs;
+
+ case EVFSNABS:
+ vc.wp[0] = va.wp[0] | SIGN_BIT_S;
+ vc.wp[1] = va.wp[1] | SIGN_BIT_S;
+ goto update_regs;
+
+ case EVFSNEG:
+ vc.wp[0] = va.wp[0] ^ SIGN_BIT_S;
+ vc.wp[1] = va.wp[1] ^ SIGN_BIT_S;
+ goto update_regs;
+
+ case EVFSADD:
+ FP_ADD_S(SR0, SA0, SB0);
+ FP_ADD_S(SR1, SA1, SB1);
+ goto pack_vs;
+
+ case EVFSSUB:
+ FP_SUB_S(SR0, SA0, SB0);
+ FP_SUB_S(SR1, SA1, SB1);
+ goto pack_vs;
+
+ case EVFSMUL:
+ FP_MUL_S(SR0, SA0, SB0);
+ FP_MUL_S(SR1, SA1, SB1);
+ goto pack_vs;
+
+ case EVFSDIV:
+ FP_DIV_S(SR0, SA0, SB0);
+ FP_DIV_S(SR1, SA1, SB1);
+ goto pack_vs;
+
+ case EVFSCMPEQ:
+ cmp = 0;
+ goto cmp_vs;
+
+ case EVFSCMPGT:
+ cmp = 1;
+ goto cmp_vs;
+
+ case EVFSCMPLT:
+ cmp = -1;
+ goto cmp_vs;
+
+ case EVFSCTSF:
+ __asm__ __volatile__ ("mtspr 512, %4\n"
+ "efsctsf %0, %2\n"
+ "efsctsf %1, %3\n"
+ : "=r" (vc.wp[0]), "=r" (vc.wp[1])
+ : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0));
+ goto update_regs;
+
+ case EVFSCTUF:
+ __asm__ __volatile__ ("mtspr 512, %4\n"
+ "efsctuf %0, %2\n"
+ "efsctuf %1, %3\n"
+ : "=r" (vc.wp[0]), "=r" (vc.wp[1])
+ : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0));
+ goto update_regs;
+
+ case EVFSCTUI:
+ case EVFSCTSI:
+ case EVFSCTUIZ:
+ case EVFSCTSIZ:
+ if (func & 0x4) {
+ _FP_ROUND(1, SB0);
+ _FP_ROUND(1, SB1);
+ } else {
+ _FP_ROUND_ZERO(1, SB0);
+ _FP_ROUND_ZERO(1, SB1);
+ }
+ FP_TO_INT_S(vc.wp[0], SB0, 32, ((func & 0x3) != 0));
+ FP_TO_INT_S(vc.wp[1], SB1, 32, ((func & 0x3) != 0));
+ goto update_regs;
+
+ default:
+ goto illegal;
+ }
+ break;
+
+pack_vs:
+#ifdef DEBUG
+ printk("SR0: %ld %08lx %ld (%ld)\n", SR0_s, SR0_f, SR0_e, SR0_c);
+ printk("SR1: %ld %08lx %ld (%ld)\n", SR1_s, SR1_f, SR1_e, SR1_c);
+#endif
+ FP_PACK_SP(vc.wp, SR0);
+ FP_PACK_SP(vc.wp + 1, SR1);
+ goto update_regs;
+
+cmp_vs:
+ {
+ int ch, cl;
+
+ FP_CMP_S(IR0, SA0, SB0, 3);
+ FP_CMP_S(IR1, SA1, SB1, 3);
+ if (IR0 == 3 && (FP_ISSIGNAN_S(SA0) || FP_ISSIGNAN_S(SB0)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ if (IR1 == 3 && (FP_ISSIGNAN_S(SA1) || FP_ISSIGNAN_S(SB1)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ ch = (IR0 == cmp) ? 1 : 0;
+ cl = (IR1 == cmp) ? 1 : 0;
+ IR = (ch << 3) | (cl << 2) | ((ch | cl) << 1) |
+ ((ch & cl) << 0);
+ goto update_ccr;
+ }
+ }
+ default:
+ return -EINVAL;
+ }
+
+update_ccr:
+ regs->ccr &= ~(15 << ((7 - ((speinsn >> 23) & 0x7)) << 2));
+ regs->ccr |= (IR << ((7 - ((speinsn >> 23) & 0x7)) << 2));
+
+update_regs:
+ __FPU_FPSCR &= ~FP_EX_MASK;
+ __FPU_FPSCR |= (FP_CUR_EXCEPTIONS & FP_EX_MASK);
+ mtspr(SPRN_SPEFSCR, __FPU_FPSCR);
+
+ current->thread.evr[fc] = vc.wp[0];
+ regs->gpr[fc] = vc.wp[1];
+
+#ifdef DEBUG
+ printk("ccr = %08lx\n", regs->ccr);
+ printk("cur exceptions = %08x spefscr = %08lx\n",
+ FP_CUR_EXCEPTIONS, __FPU_FPSCR);
+ printk("vc: %08x %08x\n", vc.wp[0], vc.wp[1]);
+ printk("va: %08x %08x\n", va.wp[0], va.wp[1]);
+ printk("vb: %08x %08x\n", vb.wp[0], vb.wp[1]);
+#endif
+
+ return 0;
+
+illegal:
+ printk(KERN_ERR "\nOoops! IEEE-754 compliance handler encountered un-supported instruction.\ninst code: %08lx\n", speinsn);
+ return -ENOSYS;
+}
+
+int speround_handler(struct pt_regs *regs)
+{
+ union dw_union fgpr;
+ int s_lo, s_hi;
+ unsigned long speinsn, type, fc;
+
+ if (get_user(speinsn, (unsigned int __user *) regs->nip))
+ return -EFAULT;
+ if ((speinsn >> 26) != 4)
+ return -EINVAL; /* not an spe instruction */
+
+ type = insn_type(speinsn & 0x7ff);
+ if (type == XCR) return -ENOSYS;
+
+ fc = (speinsn >> 21) & 0x1f;
+ s_lo = regs->gpr[fc] & SIGN_BIT_S;
+ s_hi = current->thread.evr[fc] & SIGN_BIT_S;
+ fgpr.wp[0] = current->thread.evr[fc];
+ fgpr.wp[1] = regs->gpr[fc];
+
+ __FPU_FPSCR = mfspr(SPRN_SPEFSCR);
+
+ switch ((speinsn >> 5) & 0x7) {
+ /* Since SPE instructions on E500 core can handle round to nearest
+ * and round toward zero with IEEE-754 complied, we just need
+ * to handle round toward +Inf and round toward -Inf by software.
+ */
+ case SPFP:
+ if ((FP_ROUNDMODE) == FP_RND_PINF) {
+ if (!s_lo) fgpr.wp[1]++; /* Z > 0, choose Z1 */
+ } else { /* round to -Inf */
+ if (s_lo) fgpr.wp[1]++; /* Z < 0, choose Z2 */
+ }
+ break;
+
+ case DPFP:
+ if (FP_ROUNDMODE == FP_RND_PINF) {
+ if (!s_hi) fgpr.dp[0]++; /* Z > 0, choose Z1 */
+ } else { /* round to -Inf */
+ if (s_hi) fgpr.dp[0]++; /* Z < 0, choose Z2 */
+ }
+ break;
+
+ case VCT:
+ if (FP_ROUNDMODE == FP_RND_PINF) {
+ if (!s_lo) fgpr.wp[1]++; /* Z_low > 0, choose Z1 */
+ if (!s_hi) fgpr.wp[0]++; /* Z_high word > 0, choose Z1 */
+ } else { /* round to -Inf */
+ if (s_lo) fgpr.wp[1]++; /* Z_low < 0, choose Z2 */
+ if (s_hi) fgpr.wp[0]++; /* Z_high < 0, choose Z2 */
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ current->thread.evr[fc] = fgpr.wp[0];
+ regs->gpr[fc] = fgpr.wp[1];
+
+ return 0;
+}
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index e7392b45a5ef..953cc4a1cde5 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -6,17 +6,19 @@ ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc
endif
-obj-y := fault.o mem.o \
+obj-y := fault.o mem.o pgtable.o \
init_$(CONFIG_WORD_SIZE).o \
- pgtable_$(CONFIG_WORD_SIZE).o \
- mmu_context_$(CONFIG_WORD_SIZE).o
+ pgtable_$(CONFIG_WORD_SIZE).o
+obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
+ tlb_nohash_low.o
hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC64) += hash_utils_64.o \
slb_low.o slb.o stab.o \
gup.o mmap.o $(hash-y)
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
- tlb_$(CONFIG_WORD_SIZE).o
+ tlb_hash$(CONFIG_WORD_SIZE).o \
+ mmu_context_hash$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_40x) += 40x_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 565b7a237c84..91c7b8636b8a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -30,6 +30,7 @@
#include <linux/kprobes.h>
#include <linux/kdebug.h>
+#include <asm/firmware.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
@@ -283,7 +284,7 @@ good_area:
}
pte_update(ptep, 0, _PAGE_HWEXEC |
_PAGE_ACCESSED);
- _tlbie(address, mm->context.id);
+ local_flush_tlb_page(vma, address);
pte_unmap_unlock(ptep, ptl);
up_read(&mm->mmap_sem);
return 0;
@@ -318,9 +319,16 @@ good_area:
goto do_sigbus;
BUG();
}
- if (ret & VM_FAULT_MAJOR)
+ if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
- else
+#ifdef CONFIG_PPC_SMLPAR
+ if (firmware_has_feature(FW_FEATURE_CMO)) {
+ preempt_disable();
+ get_lppaca()->page_ins += (1 << PAGE_FACTOR);
+ preempt_enable();
+ }
+#endif
+ } else
current->min_flt++;
up_read(&mm->mmap_sem);
return 0;
@@ -339,7 +347,7 @@ bad_area_nosemaphore:
&& printk_ratelimit())
printk(KERN_CRIT "kernel tried to execute NX-protected"
" page (%lx) - exploit attempt? (uid: %d)\n",
- address, current->uid);
+ address, current_uid());
return SIGSEGV;
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 7bffb70b9fe2..67850ec9feb3 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -36,36 +36,6 @@ mmu_hash_lock:
#endif /* CONFIG_SMP */
/*
- * Sync CPUs with hash_page taking & releasing the hash
- * table lock
- */
-#ifdef CONFIG_SMP
- .text
-_GLOBAL(hash_page_sync)
- mfmsr r10
- rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
- mtmsr r0
- lis r8,mmu_hash_lock@h
- ori r8,r8,mmu_hash_lock@l
- lis r0,0x0fff
- b 10f
-11: lwz r6,0(r8)
- cmpwi 0,r6,0
- bne 11b
-10: lwarx r6,0,r8
- cmpwi 0,r6,0
- bne- 11b
- stwcx. r0,0,r8
- bne- 10b
- isync
- eieio
- li r0,0
- stw r0,0(r8)
- mtmsr r10
- blr
-#endif /* CONFIG_SMP */
-
-/*
* Load a PTE into the hash table, if possible.
* The address is in r4, and r3 contains an access flag:
* _PAGE_RW (0x400) if a write.
@@ -353,8 +323,8 @@ _GLOBAL(create_hpte)
ori r8,r8,0xe14 /* clear out reserved bits and M */
andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
BEGIN_FTR_SECTION
- ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
-END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
+ rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
+END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
#ifdef CONFIG_PTE_64BIT
/* Put the XPN bits into the PTE */
rlwimi r8,r10,8,20,22
@@ -663,3 +633,80 @@ _GLOBAL(flush_hash_patch_B)
SYNC_601
isync
blr
+
+/*
+ * Flush an entry from the TLB
+ */
+_GLOBAL(_tlbie)
+#ifdef CONFIG_SMP
+ rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
+ lwz r8,TI_CPU(r8)
+ oris r8,r8,11
+ mfmsr r10
+ SYNC
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ rlwinm r0,r0,0,28,26 /* clear DR */
+ mtmsr r0
+ SYNC_601
+ isync
+ lis r9,mmu_hash_lock@h
+ ori r9,r9,mmu_hash_lock@l
+ tophys(r9,r9)
+10: lwarx r7,0,r9
+ cmpwi 0,r7,0
+ bne- 10b
+ stwcx. r8,0,r9
+ bne- 10b
+ eieio
+ tlbie r3
+ sync
+ TLBSYNC
+ li r0,0
+ stw r0,0(r9) /* clear mmu_hash_lock */
+ mtmsr r10
+ SYNC_601
+ isync
+#else /* CONFIG_SMP */
+ tlbie r3
+ sync
+#endif /* CONFIG_SMP */
+ blr
+
+/*
+ * Flush the entire TLB. 603/603e only
+ */
+_GLOBAL(_tlbia)
+#if defined(CONFIG_SMP)
+ rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
+ lwz r8,TI_CPU(r8)
+ oris r8,r8,10
+ mfmsr r10
+ SYNC
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ rlwinm r0,r0,0,28,26 /* clear DR */
+ mtmsr r0
+ SYNC_601
+ isync
+ lis r9,mmu_hash_lock@h
+ ori r9,r9,mmu_hash_lock@l
+ tophys(r9,r9)
+10: lwarx r7,0,r9
+ cmpwi 0,r7,0
+ bne- 10b
+ stwcx. r8,0,r9
+ bne- 10b
+ sync
+ tlbia
+ sync
+ TLBSYNC
+ li r0,0
+ stw r0,0(r9) /* clear mmu_hash_lock */
+ mtmsr r10
+ SYNC_601
+ isync
+#else /* CONFIG_SMP */
+ sync
+ tlbia
+ sync
+#endif /* CONFIG_SMP */
+ blr
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index f0c3b88d50fa..9920d6a7cf29 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -53,8 +53,7 @@ unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */
/* Subtract one from array size because we don't need a cache for 4K since
* is not a huge page size */
-#define huge_pgtable_cache(psize) (pgtable_cache[HUGEPTE_CACHE_NUM \
- + psize-1])
+#define HUGE_PGTABLE_INDEX(psize) (HUGEPTE_CACHE_NUM + psize - 1)
#define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize])
static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = {
@@ -113,7 +112,7 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
unsigned long address, unsigned int psize)
{
- pte_t *new = kmem_cache_zalloc(huge_pgtable_cache(psize),
+ pte_t *new = kmem_cache_zalloc(pgtable_cache[HUGE_PGTABLE_INDEX(psize)],
GFP_KERNEL|__GFP_REPEAT);
if (! new)
@@ -121,7 +120,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
spin_lock(&mm->page_table_lock);
if (!hugepd_none(*hpdp))
- kmem_cache_free(huge_pgtable_cache(psize), new);
+ kmem_cache_free(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], new);
else
hpdp->pd = (unsigned long)new | HUGEPD_OK;
spin_unlock(&mm->page_table_lock);
@@ -513,6 +512,13 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
}
+unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
+{
+ unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
+
+ return 1UL << mmu_psize_to_shift(psize);
+}
+
/*
* Called by asm hashtable.S for doing lazy icache flush
*/
@@ -763,13 +769,14 @@ static int __init hugetlbpage_init(void)
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
if (mmu_huge_psizes[psize]) {
- huge_pgtable_cache(psize) = kmem_cache_create(
- HUGEPTE_CACHE_NAME(psize),
- HUGEPTE_TABLE_SIZE(psize),
- HUGEPTE_TABLE_SIZE(psize),
- 0,
- NULL);
- if (!huge_pgtable_cache(psize))
+ pgtable_cache[HUGE_PGTABLE_INDEX(psize)] =
+ kmem_cache_create(
+ HUGEPTE_CACHE_NAME(psize),
+ HUGEPTE_TABLE_SIZE(psize),
+ HUGEPTE_TABLE_SIZE(psize),
+ 0,
+ NULL);
+ if (!pgtable_cache[HUGE_PGTABLE_INDEX(psize)])
panic("hugetlbpage_init(): could not create %s"\
"\n", HUGEPTE_CACHE_NAME(psize));
}
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 388ceda632f3..666a5e8a5be1 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -35,7 +35,6 @@
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
-#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
@@ -49,7 +48,7 @@
#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
-#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE))
+#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET))
#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
#endif
#endif
@@ -180,9 +179,6 @@ void __init MMU_init(void)
if (ppc_md.progress)
ppc_md.progress("MMU:setio", 0x302);
- /* Initialize the context management stuff */
- mmu_context_init();
-
if (ppc_md.progress)
ppc_md.progress("MMU:exit", 0x211);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index b9e1a1da6e52..f00f09a77f12 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -102,8 +102,8 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
if (!page_is_ram(pfn))
- vma_prot = __pgprot(pgprot_val(vma_prot)
- | _PAGE_GUARDED | _PAGE_NO_CACHE);
+ vma_prot = pgprot_noncached(vma_prot);
+
return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);
@@ -132,7 +132,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
/* this should work for most non-highmem platforms */
zone = pgdata->node_zones;
- return __add_pages(zone, start_pfn, nr_pages);
+ return __add_pages(nid, zone, start_pfn, nr_pages);
}
#endif /* CONFIG_MEMORY_HOTPLUG */
@@ -488,7 +488,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
* we invalidate the TLB here, thus avoiding dcbst
* misbehaviour.
*/
- _tlbie(address, 0 /* 8xx doesn't care about PID */);
+ _tlbil_va(address, 0 /* 8xx doesn't care about PID */);
#endif
/* The _PAGE_USER test should really be _PAGE_EXEC, but
* older glibc versions execute some code from no-exec
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c
deleted file mode 100644
index cc32ba41d900..000000000000
--- a/arch/powerpc/mm/mmu_context_32.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * This file contains the routines for handling the MMU on those
- * PowerPC implementations where the MMU substantially follows the
- * architecture specification. This includes the 6xx, 7xx, 7xxx,
- * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
- * -- paulus
- *
- * Derived from arch/ppc/mm/init.c:
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
- * and Cort Dougan (PReP) (cort@cs.nmt.edu)
- * Copyright (C) 1996 Paul Mackerras
- *
- * Derived from "arch/i386/mm/init.c"
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/mm.h>
-#include <linux/init.h>
-
-#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
-
-unsigned long next_mmu_context;
-unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
-#ifdef FEW_CONTEXTS
-atomic_t nr_free_contexts;
-struct mm_struct *context_mm[LAST_CONTEXT+1];
-void steal_context(void);
-#endif /* FEW_CONTEXTS */
-
-/*
- * Initialize the context management stuff.
- */
-void __init
-mmu_context_init(void)
-{
- /*
- * Some processors have too few contexts to reserve one for
- * init_mm, and require using context 0 for a normal task.
- * Other processors reserve the use of context zero for the kernel.
- * This code assumes FIRST_CONTEXT < 32.
- */
- context_map[0] = (1 << FIRST_CONTEXT) - 1;
- next_mmu_context = FIRST_CONTEXT;
-#ifdef FEW_CONTEXTS
- atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
-#endif /* FEW_CONTEXTS */
-}
-
-#ifdef FEW_CONTEXTS
-/*
- * Steal a context from a task that has one at the moment.
- * This is only used on 8xx and 4xx and we presently assume that
- * they don't do SMP. If they do then this will have to check
- * whether the MM we steal is in use.
- * We also assume that this is only used on systems that don't
- * use an MMU hash table - this is true for 8xx and 4xx.
- * This isn't an LRU system, it just frees up each context in
- * turn (sort-of pseudo-random replacement :). This would be the
- * place to implement an LRU scheme if anyone was motivated to do it.
- * -- paulus
- */
-void
-steal_context(void)
-{
- struct mm_struct *mm;
-
- /* free up context `next_mmu_context' */
- /* if we shouldn't free context 0, don't... */
- if (next_mmu_context < FIRST_CONTEXT)
- next_mmu_context = FIRST_CONTEXT;
- mm = context_mm[next_mmu_context];
- flush_tlb_mm(mm);
- destroy_context(mm);
-}
-#endif /* FEW_CONTEXTS */
diff --git a/arch/powerpc/mm/mmu_context_hash32.c b/arch/powerpc/mm/mmu_context_hash32.c
new file mode 100644
index 000000000000..0dfba2bf7f31
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_hash32.c
@@ -0,0 +1,103 @@
+/*
+ * This file contains the routines for handling the MMU on those
+ * PowerPC implementations where the MMU substantially follows the
+ * architecture specification. This includes the 6xx, 7xx, 7xxx,
+ * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
+ * -- paulus
+ *
+ * Derived from arch/ppc/mm/init.c:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+/*
+ * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
+ * (virtual segment identifiers) for each context. Although the
+ * hardware supports 24-bit VSIDs, and thus >1 million contexts,
+ * we only use 32,768 of them. That is ample, since there can be
+ * at most around 30,000 tasks in the system anyway, and it means
+ * that we can use a bitmap to indicate which contexts are in use.
+ * Using a bitmap means that we entirely avoid all of the problems
+ * that we used to have when the context number overflowed,
+ * particularly on SMP systems.
+ * -- paulus.
+ */
+#define NO_CONTEXT ((unsigned long) -1)
+#define LAST_CONTEXT 32767
+#define FIRST_CONTEXT 1
+
+/*
+ * This function defines the mapping from contexts to VSIDs (virtual
+ * segment IDs). We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table. Note, if this
+ * function is changed then arch/ppc/mm/hashtable.S will have to be
+ * changed to correspond.
+ *
+ *
+ * CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
+ * & 0xffffff)
+ */
+
+static unsigned long next_mmu_context;
+static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+
+
+/*
+ * Set up the context for a new address space.
+ */
+int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+ unsigned long ctx = next_mmu_context;
+
+ while (test_and_set_bit(ctx, context_map)) {
+ ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
+ if (ctx > LAST_CONTEXT)
+ ctx = 0;
+ }
+ next_mmu_context = (ctx + 1) & LAST_CONTEXT;
+ mm->context.id = ctx;
+
+ return 0;
+}
+
+/*
+ * We're finished using the context for an address space.
+ */
+void destroy_context(struct mm_struct *mm)
+{
+ preempt_disable();
+ if (mm->context.id != NO_CONTEXT) {
+ clear_bit(mm->context.id, context_map);
+ mm->context.id = NO_CONTEXT;
+ }
+ preempt_enable();
+}
+
+/*
+ * Initialize the context management stuff.
+ */
+void __init mmu_context_init(void)
+{
+ /* Reserve context 0 for kernel use */
+ context_map[0] = (1 << FIRST_CONTEXT) - 1;
+ next_mmu_context = FIRST_CONTEXT;
+}
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 1db38ba1f544..dbeb86ac90cd 100644
--- a/arch/powerpc/mm/mmu_context_64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -24,6 +24,14 @@
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDR(mmu_context_idr);
+/*
+ * The proto-VSID space has 2^35 - 1 segments available for user mappings.
+ * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
+ * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
+ */
+#define NO_CONTEXT 0
+#define MAX_CONTEXT ((1UL << 19) - 1)
+
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int index;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
new file mode 100644
index 000000000000..52a0cfc38b64
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -0,0 +1,397 @@
+/*
+ * This file contains the routines for handling the MMU on those
+ * PowerPC implementations where the MMU is not using the hash
+ * table, such as 8xx, 4xx, BookE's etc...
+ *
+ * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
+ * IBM Corp.
+ *
+ * Derived from previous arch/powerpc/mm/mmu_context.c
+ * and arch/powerpc/include/asm/mmu_context.h
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * TODO:
+ *
+ * - The global context lock will not scale very well
+ * - The maps should be dynamically allocated to allow for processors
+ * that support more PID bits at runtime
+ * - Implement flush_tlb_mm() by making the context stale and picking
+ * a new one
+ * - More aggressively clear stale map bits and maybe find some way to
+ * also clear mm->cpu_vm_mask bits when processes are migrated
+ */
+
+#undef DEBUG
+#define DEBUG_STEAL_ONLY
+#undef DEBUG_MAP_CONSISTENCY
+/*#define DEBUG_CLAMP_LAST_CONTEXT 15 */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/bootmem.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+static unsigned int first_context, last_context;
+static unsigned int next_context, nr_free_contexts;
+static unsigned long *context_map;
+static unsigned long *stale_map[NR_CPUS];
+static struct mm_struct **context_mm;
+static spinlock_t context_lock = SPIN_LOCK_UNLOCKED;
+
+#define CTX_MAP_SIZE \
+ (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
+
+
+/* Steal a context from a task that has one at the moment.
+ *
+ * This is used when we are running out of available PID numbers
+ * on the processors.
+ *
+ * This isn't an LRU system, it just frees up each context in
+ * turn (sort-of pseudo-random replacement :). This would be the
+ * place to implement an LRU scheme if anyone was motivated to do it.
+ * -- paulus
+ *
+ * For context stealing, we use a slightly different approach for
+ * SMP and UP. Basically, the UP one is simpler and doesn't use
+ * the stale map as we can just flush the local CPU
+ * -- benh
+ */
+#ifdef CONFIG_SMP
+static unsigned int steal_context_smp(unsigned int id)
+{
+ struct mm_struct *mm;
+ unsigned int cpu, max;
+
+ again:
+ max = last_context - first_context;
+
+ /* Attempt to free next_context first and then loop until we manage */
+ while (max--) {
+ /* Pick up the victim mm */
+ mm = context_mm[id];
+
+ /* We have a candidate victim, check if it's active, on SMP
+ * we cannot steal active contexts
+ */
+ if (mm->context.active) {
+ id++;
+ if (id > last_context)
+ id = first_context;
+ continue;
+ }
+ pr_debug("[%d] steal context %d from mm @%p\n",
+ smp_processor_id(), id, mm);
+
+ /* Mark this mm has having no context anymore */
+ mm->context.id = MMU_NO_CONTEXT;
+
+ /* Mark it stale on all CPUs that used this mm */
+ for_each_cpu_mask_nr(cpu, mm->cpu_vm_mask)
+ __set_bit(id, stale_map[cpu]);
+ return id;
+ }
+
+ /* This will happen if you have more CPUs than available contexts,
+ * all we can do here is wait a bit and try again
+ */
+ spin_unlock(&context_lock);
+ cpu_relax();
+ spin_lock(&context_lock);
+ goto again;
+}
+#endif /* CONFIG_SMP */
+
+/* Note that this will also be called on SMP if all other CPUs are
+ * offlined, which means that it may be called for cpu != 0. For
+ * this to work, we somewhat assume that CPUs that are onlined
+ * come up with a fully clean TLB (or are cleaned when offlined)
+ */
+static unsigned int steal_context_up(unsigned int id)
+{
+ struct mm_struct *mm;
+ int cpu = smp_processor_id();
+
+ /* Pick up the victim mm */
+ mm = context_mm[id];
+
+ pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm);
+
+ /* Mark this mm has having no context anymore */
+ mm->context.id = MMU_NO_CONTEXT;
+
+ /* Flush the TLB for that context */
+ local_flush_tlb_mm(mm);
+
+ /* XXX This clear should ultimately be part of local_flush_tlb_mm */
+ __clear_bit(id, stale_map[cpu]);
+
+ return id;
+}
+
+#ifdef DEBUG_MAP_CONSISTENCY
+static void context_check_map(void)
+{
+ unsigned int id, nrf, nact;
+
+ nrf = nact = 0;
+ for (id = first_context; id <= last_context; id++) {
+ int used = test_bit(id, context_map);
+ if (!used)
+ nrf++;
+ if (used != (context_mm[id] != NULL))
+ pr_err("MMU: Context %d is %s and MM is %p !\n",
+ id, used ? "used" : "free", context_mm[id]);
+ if (context_mm[id] != NULL)
+ nact += context_mm[id]->context.active;
+ }
+ if (nrf != nr_free_contexts) {
+ pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
+ nr_free_contexts, nrf);
+ nr_free_contexts = nrf;
+ }
+ if (nact > num_online_cpus())
+ pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
+ nact, num_online_cpus());
+ if (first_context > 0 && !test_bit(0, context_map))
+ pr_err("MMU: Context 0 has been freed !!!\n");
+}
+#else
+static void context_check_map(void) { }
+#endif
+
+void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
+{
+ unsigned int id, cpu = smp_processor_id();
+ unsigned long *map;
+
+ /* No lockless fast path .. yet */
+ spin_lock(&context_lock);
+
+#ifndef DEBUG_STEAL_ONLY
+ pr_debug("[%d] activating context for mm @%p, active=%d, id=%d\n",
+ cpu, next, next->context.active, next->context.id);
+#endif
+
+#ifdef CONFIG_SMP
+ /* Mark us active and the previous one not anymore */
+ next->context.active++;
+ if (prev) {
+#ifndef DEBUG_STEAL_ONLY
+ pr_debug(" old context %p active was: %d\n",
+ prev, prev->context.active);
+#endif
+ WARN_ON(prev->context.active < 1);
+ prev->context.active--;
+ }
+#endif /* CONFIG_SMP */
+
+ /* If we already have a valid assigned context, skip all that */
+ id = next->context.id;
+ if (likely(id != MMU_NO_CONTEXT))
+ goto ctxt_ok;
+
+ /* We really don't have a context, let's try to acquire one */
+ id = next_context;
+ if (id > last_context)
+ id = first_context;
+ map = context_map;
+
+ /* No more free contexts, let's try to steal one */
+ if (nr_free_contexts == 0) {
+#ifdef CONFIG_SMP
+ if (num_online_cpus() > 1) {
+ id = steal_context_smp(id);
+ goto stolen;
+ }
+#endif /* CONFIG_SMP */
+ id = steal_context_up(id);
+ goto stolen;
+ }
+ nr_free_contexts--;
+
+ /* We know there's at least one free context, try to find it */
+ while (__test_and_set_bit(id, map)) {
+ id = find_next_zero_bit(map, last_context+1, id);
+ if (id > last_context)
+ id = first_context;
+ }
+ stolen:
+ next_context = id + 1;
+ context_mm[id] = next;
+ next->context.id = id;
+
+#ifndef DEBUG_STEAL_ONLY
+ pr_debug("[%d] picked up new id %d, nrf is now %d\n",
+ cpu, id, nr_free_contexts);
+#endif
+
+ context_check_map();
+ ctxt_ok:
+
+ /* If that context got marked stale on this CPU, then flush the
+ * local TLB for it and unmark it before we use it
+ */
+ if (test_bit(id, stale_map[cpu])) {
+ pr_debug("[%d] flushing stale context %d for mm @%p !\n",
+ cpu, id, next);
+ local_flush_tlb_mm(next);
+
+ /* XXX This clear should ultimately be part of local_flush_tlb_mm */
+ __clear_bit(id, stale_map[cpu]);
+ }
+
+ /* Flick the MMU and release lock */
+ set_context(id, next->pgd);
+ spin_unlock(&context_lock);
+}
+
+/*
+ * Set up the context for a new address space.
+ */
+int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+ mm->context.id = MMU_NO_CONTEXT;
+ mm->context.active = 0;
+
+ return 0;
+}
+
+/*
+ * We're finished using the context for an address space.
+ */
+void destroy_context(struct mm_struct *mm)
+{
+ unsigned int id;
+
+ if (mm->context.id == MMU_NO_CONTEXT)
+ return;
+
+ WARN_ON(mm->context.active != 0);
+
+ spin_lock(&context_lock);
+ id = mm->context.id;
+ if (id != MMU_NO_CONTEXT) {
+ __clear_bit(id, context_map);
+ mm->context.id = MMU_NO_CONTEXT;
+#ifdef DEBUG_MAP_CONSISTENCY
+ mm->context.active = 0;
+ context_mm[id] = NULL;
+#endif
+ nr_free_contexts++;
+ }
+ spin_unlock(&context_lock);
+}
+
+#ifdef CONFIG_SMP
+
+static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned int)(long)hcpu;
+
+ /* We don't touch CPU 0 map, it's allocated at aboot and kept
+ * around forever
+ */
+ if (cpu == 0)
+ return NOTIFY_OK;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ pr_debug("MMU: Allocating stale context map for CPU %d\n", cpu);
+ stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
+ break;
+#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ pr_debug("MMU: Freeing stale context map for CPU %d\n", cpu);
+ kfree(stale_map[cpu]);
+ stale_map[cpu] = NULL;
+ break;
+#endif
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
+ .notifier_call = mmu_context_cpu_notify,
+};
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Initialize the context management stuff.
+ */
+void __init mmu_context_init(void)
+{
+ /* Mark init_mm as being active on all possible CPUs since
+ * we'll get called with prev == init_mm the first time
+ * we schedule on a given CPU
+ */
+ init_mm.context.active = NR_CPUS;
+
+ /*
+ * The MPC8xx has only 16 contexts. We rotate through them on each
+ * task switch. A better way would be to keep track of tasks that
+ * own contexts, and implement an LRU usage. That way very active
+ * tasks don't always have to pay the TLB reload overhead. The
+ * kernel pages are mapped shared, so the kernel can run on behalf
+ * of any task that makes a kernel entry. Shared does not mean they
+ * are not protected, just that the ASID comparison is not performed.
+ * -- Dan
+ *
+ * The IBM4xx has 256 contexts, so we can just rotate through these
+ * as a way of "switching" contexts. If the TID of the TLB is zero,
+ * the PID/TID comparison is disabled, so we can use a TID of zero
+ * to represent all kernel pages as shared among all contexts.
+ * -- Dan
+ */
+ if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
+ first_context = 0;
+ last_context = 15;
+ } else {
+ first_context = 1;
+ last_context = 255;
+ }
+
+#ifdef DEBUG_CLAMP_LAST_CONTEXT
+ last_context = DEBUG_CLAMP_LAST_CONTEXT;
+#endif
+ /*
+ * Allocate the maps used by context management
+ */
+ context_map = alloc_bootmem(CTX_MAP_SIZE);
+ context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1));
+ stale_map[0] = alloc_bootmem(CTX_MAP_SIZE);
+
+#ifdef CONFIG_SMP
+ register_cpu_notifier(&mmu_context_cpu_nb);
+#endif
+
+ printk(KERN_INFO
+ "MMU: Allocated %d bytes of context maps for %d contexts\n",
+ 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
+ last_context - first_context + 1);
+
+ /*
+ * Some processors have too few contexts to reserve one for
+ * init_mm, and require using context 0 for a normal task.
+ * Other processors reserve the use of context zero for the kernel.
+ * This code assumes first_context < 32.
+ */
+ context_map[0] = (1 << first_context) - 1;
+ next_context = first_context;
+ nr_free_contexts = last_context - first_context + 1;
+}
+
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index fab3cfad4099..ad123bced404 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -22,10 +22,58 @@
#include <asm/tlbflush.h>
#include <asm/mmu.h>
+#ifdef CONFIG_PPC_MMU_NOHASH
+
+/*
+ * On 40x and 8xx, we directly inline tlbia and tlbivax
+ */
+#if defined(CONFIG_40x) || defined(CONFIG_8xx)
+static inline void _tlbil_all(void)
+{
+ asm volatile ("sync; tlbia; isync" : : : "memory");
+}
+static inline void _tlbil_pid(unsigned int pid)
+{
+ asm volatile ("sync; tlbia; isync" : : : "memory");
+}
+#else /* CONFIG_40x || CONFIG_8xx */
+extern void _tlbil_all(void);
+extern void _tlbil_pid(unsigned int pid);
+#endif /* !(CONFIG_40x || CONFIG_8xx) */
+
+/*
+ * On 8xx, we directly inline tlbie, on others, it's extern
+ */
+#ifdef CONFIG_8xx
+static inline void _tlbil_va(unsigned long address, unsigned int pid)
+{
+ asm volatile ("tlbie %0; sync" : : "r" (address) : "memory");
+}
+#else /* CONFIG_8xx */
+extern void _tlbil_va(unsigned long address, unsigned int pid);
+#endif /* CONIFG_8xx */
+
+/*
+ * As of today, we don't support tlbivax broadcast on any
+ * implementation. When that becomes the case, this will be
+ * an extern.
+ */
+static inline void _tlbivax_bcast(unsigned long address, unsigned int pid)
+{
+ BUG();
+}
+
+#else /* CONFIG_PPC_MMU_NOHASH */
+
extern void hash_preload(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap);
+extern void _tlbie(unsigned long address);
+extern void _tlbia(void);
+
+#endif /* CONFIG_PPC_MMU_NOHASH */
+
#ifdef CONFIG_PPC32
extern void mapin_ram(void);
extern int map_page(unsigned long va, phys_addr_t pa, int flags);
@@ -58,17 +106,14 @@ extern phys_addr_t lowmem_end_addr;
* architectures. -- Dan
*/
#if defined(CONFIG_8xx)
-#define flush_HPTE(X, va, pg) _tlbie(va, 0 /* 8xx doesn't care about PID */)
#define MMU_init_hw() do { } while(0)
#define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx)
-#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
#elif defined(CONFIG_FSL_BOOKE)
-#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
extern void adjust_total_lowmem(void);
@@ -77,18 +122,4 @@ extern void adjust_total_lowmem(void);
/* anything 32-bit except 4xx or 8xx */
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
-
-/* Be careful....this needs to be updated if we ever encounter 603 SMPs,
- * which includes all new 82xx processors. We need tlbie/tlbsync here
- * in that case (I think). -- Dan.
- */
-static inline void flush_HPTE(unsigned context, unsigned long va,
- unsigned long pdval)
-{
- if ((Hash != 0) &&
- cpu_has_feature(CPU_FTR_HPTE_TABLE))
- flush_hash_pages(0, va, pdval, 1);
- else
- _tlbie(va);
-}
#endif
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index cf81049e1e51..7393bd76d698 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -822,42 +822,50 @@ static void __init dump_numa_memory_topology(void)
* required. nid is the preferred node and end is the physical address of
* the highest address in the node.
*
- * Returns the physical address of the memory.
+ * Returns the virtual address of the memory.
*/
-static void __init *careful_allocation(int nid, unsigned long size,
+static void __init *careful_zallocation(int nid, unsigned long size,
unsigned long align,
unsigned long end_pfn)
{
+ void *ret;
int new_nid;
- unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
+ unsigned long ret_paddr;
+
+ ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
/* retry over all memory */
- if (!ret)
- ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
+ if (!ret_paddr)
+ ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
- if (!ret)
- panic("numa.c: cannot allocate %lu bytes on node %d",
+ if (!ret_paddr)
+ panic("numa.c: cannot allocate %lu bytes for node %d",
size, nid);
+ ret = __va(ret_paddr);
+
/*
- * If the memory came from a previously allocated node, we must
- * retry with the bootmem allocator.
+ * We initialize the nodes in numeric order: 0, 1, 2...
+ * and hand over control from the LMB allocator to the
+ * bootmem allocator. If this function is called for
+ * node 5, then we know that all nodes <5 are using the
+ * bootmem allocator instead of the LMB allocator.
+ *
+ * So, check the nid from which this allocation came
+ * and double check to see if we need to use bootmem
+ * instead of the LMB. We don't free the LMB memory
+ * since it would be useless.
*/
- new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT);
+ new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
if (new_nid < nid) {
- ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid),
+ ret = __alloc_bootmem_node(NODE_DATA(new_nid),
size, align, 0);
- if (!ret)
- panic("numa.c: cannot allocate %lu bytes on node %d",
- size, new_nid);
-
- ret = __pa(ret);
-
- dbg("alloc_bootmem %lx %lx\n", ret, size);
+ dbg("alloc_bootmem %p %lx\n", ret, size);
}
- return (void *)ret;
+ memset(ret, 0, size);
+ return ret;
}
static struct notifier_block __cpuinitdata ppc64_numa_nb = {
@@ -952,7 +960,7 @@ void __init do_init_bootmem(void)
for_each_online_node(nid) {
unsigned long start_pfn, end_pfn;
- unsigned long bootmem_paddr;
+ void *bootmem_vaddr;
unsigned long bootmap_pages;
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
@@ -964,11 +972,9 @@ void __init do_init_bootmem(void)
* previous nodes' bootmem to be initialized and have
* all reserved areas marked.
*/
- NODE_DATA(nid) = careful_allocation(nid,
+ NODE_DATA(nid) = careful_zallocation(nid,
sizeof(struct pglist_data),
SMP_CACHE_BYTES, end_pfn);
- NODE_DATA(nid) = __va(NODE_DATA(nid));
- memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
dbg("node %d\n", nid);
dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
@@ -984,20 +990,20 @@ void __init do_init_bootmem(void)
dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
- bootmem_paddr = (unsigned long)careful_allocation(nid,
+ bootmem_vaddr = careful_zallocation(nid,
bootmap_pages << PAGE_SHIFT,
PAGE_SIZE, end_pfn);
- memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT);
- dbg("bootmap_paddr = %lx\n", bootmem_paddr);
+ dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
- init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
+ init_bootmem_node(NODE_DATA(nid),
+ __pa(bootmem_vaddr) >> PAGE_SHIFT,
start_pfn, end_pfn);
free_bootmem_with_active_regions(nid, end_pfn);
/*
* Be very careful about moving this around. Future
- * calls to careful_allocation() depend on this getting
+ * calls to careful_zallocation() depend on this getting
* done correctly.
*/
mark_reserved_regions_for_nid(nid);
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
new file mode 100644
index 000000000000..6d94116fdea1
--- /dev/null
+++ b/arch/powerpc/mm/pgtable.c
@@ -0,0 +1,117 @@
+/*
+ * This file contains common routines for dealing with free of page tables
+ *
+ * Derived from arch/powerpc/mm/tlb_64.c:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * Dave Engebretsen <engebret@us.ibm.com>
+ * Rework for PPC64 port.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+
+static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
+static unsigned long pte_freelist_forced_free;
+
+struct pte_freelist_batch
+{
+ struct rcu_head rcu;
+ unsigned int index;
+ pgtable_free_t tables[0];
+};
+
+#define PTE_FREELIST_SIZE \
+ ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
+ / sizeof(pgtable_free_t))
+
+static void pte_free_smp_sync(void *arg)
+{
+ /* Do nothing, just ensure we sync with all CPUs */
+}
+
+/* This is only called when we are critically out of memory
+ * (and fail to get a page in pte_free_tlb).
+ */
+static void pgtable_free_now(pgtable_free_t pgf)
+{
+ pte_freelist_forced_free++;
+
+ smp_call_function(pte_free_smp_sync, NULL, 1);
+
+ pgtable_free(pgf);
+}
+
+static void pte_free_rcu_callback(struct rcu_head *head)
+{
+ struct pte_freelist_batch *batch =
+ container_of(head, struct pte_freelist_batch, rcu);
+ unsigned int i;
+
+ for (i = 0; i < batch->index; i++)
+ pgtable_free(batch->tables[i]);
+
+ free_page((unsigned long)batch);
+}
+
+static void pte_free_submit(struct pte_freelist_batch *batch)
+{
+ INIT_RCU_HEAD(&batch->rcu);
+ call_rcu(&batch->rcu, pte_free_rcu_callback);
+}
+
+void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
+{
+ /* This is safe since tlb_gather_mmu has disabled preemption */
+ cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
+ struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
+
+ if (atomic_read(&tlb->mm->mm_users) < 2 ||
+ cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
+ pgtable_free(pgf);
+ return;
+ }
+
+ if (*batchp == NULL) {
+ *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
+ if (*batchp == NULL) {
+ pgtable_free_now(pgf);
+ return;
+ }
+ (*batchp)->index = 0;
+ }
+ (*batchp)->tables[(*batchp)->index++] = pgf;
+ if ((*batchp)->index == PTE_FREELIST_SIZE) {
+ pte_free_submit(*batchp);
+ *batchp = NULL;
+ }
+}
+
+void pte_free_finish(void)
+{
+ /* This is safe since tlb_gather_mmu has disabled preemption */
+ struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
+
+ if (*batchp == NULL)
+ return;
+ pte_free_submit(*batchp);
+ *batchp = NULL;
+}
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index c31d6d26f0b5..22972cd83cc9 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -48,10 +48,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
extern char etext[], _stext[];
-#ifdef CONFIG_SMP
-extern void hash_page_sync(void);
-#endif
-
#ifdef HAVE_BATS
extern phys_addr_t v_mapped_by_bats(unsigned long va);
extern unsigned long p_mapped_by_bats(phys_addr_t pa);
@@ -72,24 +68,29 @@ extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
#define p_mapped_by_tlbcam(x) (0UL)
#endif /* HAVE_TLBCAM */
-#ifdef CONFIG_PTE_64BIT
-/* Some processors use an 8kB pgdir because they have 8-byte Linux PTEs. */
-#define PGDIR_ORDER 1
-#else
-#define PGDIR_ORDER 0
-#endif
+#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret;
- ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER);
+ /* pgdir take page or two with 4K pages and a page fraction otherwise */
+#ifndef CONFIG_PPC_4K_PAGES
+ ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
+#else
+ ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
+ PGDIR_ORDER - PAGE_SHIFT);
+#endif
return ret;
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- free_pages((unsigned long)pgd, PGDIR_ORDER);
+#ifndef CONFIG_PPC_4K_PAGES
+ kfree((void *)pgd);
+#else
+ free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT);
+#endif
}
__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
@@ -125,23 +126,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
return ptepage;
}
-void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
-#ifdef CONFIG_SMP
- hash_page_sync();
-#endif
- free_page((unsigned long)pte);
-}
-
-void pte_free(struct mm_struct *mm, pgtable_t ptepage)
-{
-#ifdef CONFIG_SMP
- hash_page_sync();
-#endif
- pgtable_page_dtor(ptepage);
- __free_page(ptepage);
-}
-
void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
@@ -194,6 +178,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
if (p < 16*1024*1024)
p += _ISA_MEM_BASE;
+#ifndef CONFIG_CRASH_DUMP
/*
* Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that.
@@ -203,6 +188,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
(unsigned long long)p, __builtin_return_address(0));
return NULL;
}
+#endif
if (size == 0)
return NULL;
@@ -280,7 +266,8 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
/* The PTE should never be already set nor present in the
* hash table
*/
- BUG_ON(pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE));
+ BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
+ flags);
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
}
@@ -288,7 +275,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
}
/*
- * Map in a big chunk of physical memory starting at KERNELBASE.
+ * Map in a big chunk of physical memory starting at PAGE_OFFSET.
*/
void __init mapin_ram(void)
{
@@ -297,7 +284,7 @@ void __init mapin_ram(void)
int ktext;
s = mmu_mapin_ram();
- v = KERNELBASE + s;
+ v = PAGE_OFFSET + s;
p = memstart_addr + s;
for (; s < total_lowmem; s += PAGE_SIZE) {
ktext = ((char *) v >= _stext && (char *) v < etext);
@@ -363,7 +350,11 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
return -EINVAL;
set_pte_at(&init_mm, address, kpte, mk_pte(page, prot));
wmb();
- flush_HPTE(0, address, pmd_val(*kpmd));
+#ifdef CONFIG_PPC_STD_MMU
+ flush_hash_pages(0, address, pmd_val(*kpmd), 1);
+#else
+ flush_tlb_page(NULL, address);
+#endif
pte_unmap(kpte);
return 0;
@@ -400,7 +391,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
#endif /* CONFIG_DEBUG_PAGEALLOC */
static int fixmaps;
-unsigned long FIXADDR_TOP = 0xfffff000;
+unsigned long FIXADDR_TOP = (-PAGE_SIZE);
EXPORT_SYMBOL(FIXADDR_TOP);
void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 6aa120813775..45d925360b89 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -95,16 +95,16 @@ unsigned long __init mmu_mapin_ram(void)
break;
}
- setbat(2, KERNELBASE, 0, bl, _PAGE_RAM);
- done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;
+ setbat(2, PAGE_OFFSET, 0, bl, _PAGE_RAM);
+ done = (unsigned long)bat_addrs[2].limit - PAGE_OFFSET + 1;
if ((done < tot) && !bat_addrs[3].limit) {
/* use BAT3 to cover a bit more */
tot -= done;
for (bl = 128<<10; bl < max_size; bl <<= 1)
if (bl * 2 > tot)
break;
- setbat(3, KERNELBASE+done, done, bl, _PAGE_RAM);
- done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
+ setbat(3, PAGE_OFFSET+done, done, bl, _PAGE_RAM);
+ done = (unsigned long)bat_addrs[3].limit - PAGE_OFFSET + 1;
}
return done;
@@ -192,7 +192,7 @@ void __init MMU_init_hw(void)
extern unsigned int hash_page[];
extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
- if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) {
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
/*
* Put a blr (procedure return) instruction at the
* start of hash_page, since we can still get DSI
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_hash32.c
index f9a47fee3927..65190587a365 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -137,6 +137,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
flush_range(&init_mm, start, end);
FINISH_FLUSH;
}
+EXPORT_SYMBOL(flush_tlb_kernel_range);
/*
* Flush all the (user) entries for the address space described by mm.
@@ -160,6 +161,7 @@ void flush_tlb_mm(struct mm_struct *mm)
flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
FINISH_FLUSH;
}
+EXPORT_SYMBOL(flush_tlb_mm);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
@@ -176,6 +178,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
FINISH_FLUSH;
}
+EXPORT_SYMBOL(flush_tlb_page);
/*
* For each address in the range, find the pte for the address
@@ -188,3 +191,4 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
flush_range(vma->vm_mm, start, end);
FINISH_FLUSH;
}
+EXPORT_SYMBOL(flush_tlb_range);
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_hash64.c
index be7dd422c0fa..c931bc7d1079 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -37,81 +37,6 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
* arch/powerpc/include/asm/tlb.h file -- tgall
*/
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
-static unsigned long pte_freelist_forced_free;
-
-struct pte_freelist_batch
-{
- struct rcu_head rcu;
- unsigned int index;
- pgtable_free_t tables[0];
-};
-
-#define PTE_FREELIST_SIZE \
- ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
- / sizeof(pgtable_free_t))
-
-static void pte_free_smp_sync(void *arg)
-{
- /* Do nothing, just ensure we sync with all CPUs */
-}
-
-/* This is only called when we are critically out of memory
- * (and fail to get a page in pte_free_tlb).
- */
-static void pgtable_free_now(pgtable_free_t pgf)
-{
- pte_freelist_forced_free++;
-
- smp_call_function(pte_free_smp_sync, NULL, 1);
-
- pgtable_free(pgf);
-}
-
-static void pte_free_rcu_callback(struct rcu_head *head)
-{
- struct pte_freelist_batch *batch =
- container_of(head, struct pte_freelist_batch, rcu);
- unsigned int i;
-
- for (i = 0; i < batch->index; i++)
- pgtable_free(batch->tables[i]);
-
- free_page((unsigned long)batch);
-}
-
-static void pte_free_submit(struct pte_freelist_batch *batch)
-{
- INIT_RCU_HEAD(&batch->rcu);
- call_rcu(&batch->rcu, pte_free_rcu_callback);
-}
-
-void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
-{
- /* This is safe since tlb_gather_mmu has disabled preemption */
- cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
- struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
-
- if (atomic_read(&tlb->mm->mm_users) < 2 ||
- cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
- pgtable_free(pgf);
- return;
- }
-
- if (*batchp == NULL) {
- *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
- if (*batchp == NULL) {
- pgtable_free_now(pgf);
- return;
- }
- (*batchp)->index = 0;
- }
- (*batchp)->tables[(*batchp)->index++] = pgf;
- if ((*batchp)->index == PTE_FREELIST_SIZE) {
- pte_free_submit(*batchp);
- *batchp = NULL;
- }
-}
/*
* A linux PTE was changed and the corresponding hash table entry
@@ -229,17 +154,6 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
batch->index = 0;
}
-void pte_free_finish(void)
-{
- /* This is safe since tlb_gather_mmu has disabled preemption */
- struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
-
- if (*batchp == NULL)
- return;
- pte_free_submit(*batchp);
- *batchp = NULL;
-}
-
/**
* __flush_hash_table_range - Flush all HPTEs for a given address range
* from the hash table (and the TLB). But keeps
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
new file mode 100644
index 000000000000..39ac22b13c73
--- /dev/null
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -0,0 +1,210 @@
+/*
+ * This file contains the routines for TLB flushing.
+ * On machines where the MMU does not use a hash table to store virtual to
+ * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
+ * this does -not- include 603 however which shares the implementation with
+ * hash based processors)
+ *
+ * -- BenH
+ *
+ * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
+ * IBM Corp.
+ *
+ * Derived from arch/ppc/mm/init.c:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/preempt.h>
+#include <linux/spinlock.h>
+
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+
+#include "mmu_decl.h"
+
+/*
+ * Base TLB flushing operations:
+ *
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes kernel pages
+ *
+ * - local_* variants of page and mm only apply to the current
+ * processor
+ */
+
+/*
+ * These are the base non-SMP variants of page and mm flushing
+ */
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ unsigned int pid;
+
+ preempt_disable();
+ pid = mm->context.id;
+ if (pid != MMU_NO_CONTEXT)
+ _tlbil_pid(pid);
+ preempt_enable();
+}
+EXPORT_SYMBOL(local_flush_tlb_mm);
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ unsigned int pid;
+
+ preempt_disable();
+ pid = vma ? vma->vm_mm->context.id : 0;
+ if (pid != MMU_NO_CONTEXT)
+ _tlbil_va(vmaddr, pid);
+ preempt_enable();
+}
+EXPORT_SYMBOL(local_flush_tlb_page);
+
+
+/*
+ * And here are the SMP non-local implementations
+ */
+#ifdef CONFIG_SMP
+
+static DEFINE_SPINLOCK(tlbivax_lock);
+
+struct tlb_flush_param {
+ unsigned long addr;
+ unsigned int pid;
+};
+
+static void do_flush_tlb_mm_ipi(void *param)
+{
+ struct tlb_flush_param *p = param;
+
+ _tlbil_pid(p ? p->pid : 0);
+}
+
+static void do_flush_tlb_page_ipi(void *param)
+{
+ struct tlb_flush_param *p = param;
+
+ _tlbil_va(p->addr, p->pid);
+}
+
+
+/* Note on invalidations and PID:
+ *
+ * We snapshot the PID with preempt disabled. At this point, it can still
+ * change either because:
+ * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
+ * - we are invaliating some target that isn't currently running here
+ * and is concurrently acquiring a new PID on another CPU
+ * - some other CPU is re-acquiring a lost PID for this mm
+ * etc...
+ *
+ * However, this shouldn't be a problem as we only guarantee
+ * invalidation of TLB entries present prior to this call, so we
+ * don't care about the PID changing, and invalidating a stale PID
+ * is generally harmless.
+ */
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ cpumask_t cpu_mask;
+ unsigned int pid;
+
+ preempt_disable();
+ pid = mm->context.id;
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto no_context;
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+ if (!cpus_empty(cpu_mask)) {
+ struct tlb_flush_param p = { .pid = pid };
+ smp_call_function_mask(cpu_mask, do_flush_tlb_mm_ipi, &p, 1);
+ }
+ _tlbil_pid(pid);
+ no_context:
+ preempt_enable();
+}
+EXPORT_SYMBOL(flush_tlb_mm);
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ cpumask_t cpu_mask;
+ unsigned int pid;
+
+ preempt_disable();
+ pid = vma ? vma->vm_mm->context.id : 0;
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto bail;
+ cpu_mask = vma->vm_mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+ if (!cpus_empty(cpu_mask)) {
+ /* If broadcast tlbivax is supported, use it */
+ if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
+ int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
+ if (lock)
+ spin_lock(&tlbivax_lock);
+ _tlbivax_bcast(vmaddr, pid);
+ if (lock)
+ spin_unlock(&tlbivax_lock);
+ goto bail;
+ } else {
+ struct tlb_flush_param p = { .pid = pid, .addr = vmaddr };
+ smp_call_function_mask(cpu_mask,
+ do_flush_tlb_page_ipi, &p, 1);
+ }
+ }
+ _tlbil_va(vmaddr, pid);
+ bail:
+ preempt_enable();
+}
+EXPORT_SYMBOL(flush_tlb_page);
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Flush kernel TLB entries in the given range
+ */
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
+ _tlbil_pid(0);
+ preempt_enable();
+#else
+ _tlbil_pid(0);
+#endif
+}
+EXPORT_SYMBOL(flush_tlb_kernel_range);
+
+/*
+ * Currently, for range flushing, we just do a full mm flush. This should
+ * be optimized based on a threshold on the size of the range, since
+ * some implementation can stack multiple tlbivax before a tlbsync but
+ * for now, we keep it that way
+ */
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+EXPORT_SYMBOL(flush_tlb_range);
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
new file mode 100644
index 000000000000..f900a39e6ec4
--- /dev/null
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -0,0 +1,166 @@
+/*
+ * This file contains low-level functions for performing various
+ * types of TLB invalidations on various processors with no hash
+ * table.
+ *
+ * This file implements the following functions for all no-hash
+ * processors. Some aren't implemented for some variants. Some
+ * are inline in tlbflush.h
+ *
+ * - tlbil_va
+ * - tlbil_pid
+ * - tlbil_all
+ * - tlbivax_bcast (not yet)
+ *
+ * Code mostly moved over from misc_32.S
+ *
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Partially rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * Paul Mackerras, Kumar Gala and Benjamin Herrenschmidt.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/cputable.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+
+#if defined(CONFIG_40x)
+
+/*
+ * 40x implementation needs only tlbil_va
+ */
+_GLOBAL(_tlbil_va)
+ /* We run the search with interrupts disabled because we have to change
+ * the PID and I don't want to preempt when that happens.
+ */
+ mfmsr r5
+ mfspr r6,SPRN_PID
+ wrteei 0
+ mtspr SPRN_PID,r4
+ tlbsx. r3, 0, r3
+ mtspr SPRN_PID,r6
+ wrtee r5
+ bne 1f
+ sync
+ /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is
+ * clear. Since 25 is the V bit in the TLB_TAG, loading this value
+ * will invalidate the TLB entry. */
+ tlbwe r3, r3, TLB_TAG
+ isync
+1: blr
+
+#elif defined(CONFIG_8xx)
+
+/*
+ * Nothing to do for 8xx, everything is inline
+ */
+
+#elif defined(CONFIG_44x)
+
+/*
+ * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
+ * of the TLB for everything else.
+ */
+_GLOBAL(_tlbil_va)
+ mfspr r5,SPRN_MMUCR
+ rlwimi r5,r4,0,24,31 /* Set TID */
+
+ /* We have to run the search with interrupts disabled, otherwise
+ * an interrupt which causes a TLB miss can clobber the MMUCR
+ * between the mtspr and the tlbsx.
+ *
+ * Critical and Machine Check interrupts take care of saving
+ * and restoring MMUCR, so only normal interrupts have to be
+ * taken care of.
+ */
+ mfmsr r4
+ wrteei 0
+ mtspr SPRN_MMUCR,r5
+ tlbsx. r3, 0, r3
+ wrtee r4
+ bne 1f
+ sync
+ /* There are only 64 TLB entries, so r3 < 64,
+ * which means bit 22, is clear. Since 22 is
+ * the V bit in the TLB_PAGEID, loading this
+ * value will invalidate the TLB entry.
+ */
+ tlbwe r3, r3, PPC44x_TLB_PAGEID
+ isync
+1: blr
+
+_GLOBAL(_tlbil_all)
+_GLOBAL(_tlbil_pid)
+ li r3,0
+ sync
+
+ /* Load high watermark */
+ lis r4,tlb_44x_hwater@ha
+ lwz r5,tlb_44x_hwater@l(r4)
+
+1: tlbwe r3,r3,PPC44x_TLB_PAGEID
+ addi r3,r3,1
+ cmpw 0,r3,r5
+ ble 1b
+
+ isync
+ blr
+
+#elif defined(CONFIG_FSL_BOOKE)
+/*
+ * FSL BookE implementations. Currently _pid and _all are the
+ * same. This will change when tlbilx is actually supported and
+ * performs invalidate-by-PID. This change will be driven by
+ * mmu_features conditional
+ */
+
+/*
+ * Flush MMU TLB on the local processor
+ */
+_GLOBAL(_tlbil_pid)
+_GLOBAL(_tlbil_all)
+#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
+ MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
+ li r3,(MMUCSR0_TLBFI)@l
+ mtspr SPRN_MMUCSR0, r3
+1:
+ mfspr r3,SPRN_MMUCSR0
+ andi. r3,r3,MMUCSR0_TLBFI@l
+ bne 1b
+ msync
+ isync
+ blr
+
+/*
+ * Flush MMU TLB for a particular address, but only on the local processor
+ * (no broadcast)
+ */
+_GLOBAL(_tlbil_va)
+ mfmsr r10
+ wrteei 0
+ slwi r4,r4,16
+ mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
+ tlbsx 0,r3
+ mfspr r4,SPRN_MAS1 /* check valid */
+ andis. r3,r4,MAS1_VALID@h
+ beq 1f
+ rlwinm r4,r4,0,1,31
+ mtspr SPRN_MAS1,r4
+ tlbwe
+ msync
+ isync
+1: wrtee r10
+ blr
+#elif
+#error Unsupported processor type !
+#endif
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h
index 628009c01958..dfdbffa06818 100644
--- a/arch/powerpc/oprofile/cell/pr_util.h
+++ b/arch/powerpc/oprofile/cell/pr_util.h
@@ -79,7 +79,7 @@ struct spu_buffer {
* the vma-to-fileoffset map.
*/
struct vma_to_fileoffset_map *create_vma_map(const struct spu *spu,
- u64 objectid);
+ unsigned long objectid);
unsigned int vma_map_lookup(struct vma_to_fileoffset_map *map,
unsigned int vma, const struct spu *aSpu,
int *grd_val);
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
index dd499c3e9da7..83faa958b9d4 100644
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ b/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -49,7 +49,7 @@ void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_rese
* of precision. This is close enough for the purpose at hand.
*
* The value of the timeout should be small enough that the hw
- * trace buffer will not get more then about 1/3 full for the
+ * trace buffer will not get more than about 1/3 full for the
* maximum user specified (the LFSR value) hw sampling frequency.
* This is to ensure the trace buffer will never fill even if the
* kernel thread scheduling varies under a heavy system load.
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
index 2949126d28d1..6b793aeda72e 100644
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ b/arch/powerpc/oprofile/cell/spu_task_sync.c
@@ -297,7 +297,7 @@ static inline unsigned long fast_get_dcookie(struct path *path)
{
unsigned long cookie;
- if (path->dentry->d_cookie)
+ if (path->dentry->d_flags & DCACHE_COOKIE)
return (unsigned long)path->dentry;
get_dcookie(path, &cookie);
return cookie;
diff --git a/arch/powerpc/platforms/40x/ep405.c b/arch/powerpc/platforms/40x/ep405.c
index ae2e7f67c18e..4058fd1e7fc7 100644
--- a/arch/powerpc/platforms/40x/ep405.c
+++ b/arch/powerpc/platforms/40x/ep405.c
@@ -100,7 +100,7 @@ static void __init ep405_setup_arch(void)
/* Find & init the BCSR CPLD */
ep405_init_bcsr();
- ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
+ ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
}
static int __init ep405_probe(void)
diff --git a/arch/powerpc/platforms/40x/kilauea.c b/arch/powerpc/platforms/40x/kilauea.c
index 1dd24ffc0dc1..fd7d934dac8b 100644
--- a/arch/powerpc/platforms/40x/kilauea.c
+++ b/arch/powerpc/platforms/40x/kilauea.c
@@ -44,7 +44,7 @@ static int __init kilauea_probe(void)
if (!of_flat_dt_is_compatible(root, "amcc,kilauea"))
return 0;
- ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
+ ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
return 1;
}
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index 4498a86b46c3..f40ac9b8f99f 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -61,7 +61,7 @@ static int __init ppc40x_probe(void)
for (i = 0; i < ARRAY_SIZE(board); i++) {
if (of_flat_dt_is_compatible(root, board[i])) {
- ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
+ ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
return 1;
}
}
diff --git a/arch/powerpc/platforms/44x/ebony.c b/arch/powerpc/platforms/44x/ebony.c
index a0e8fe4662f6..88b9117fa691 100644
--- a/arch/powerpc/platforms/44x/ebony.c
+++ b/arch/powerpc/platforms/44x/ebony.c
@@ -54,7 +54,7 @@ static int __init ebony_probe(void)
if (!of_flat_dt_is_compatible(root, "ibm,ebony"))
return 0;
- ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
+ ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
return 1;
}
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c
index 29671262801f..76fdc51dac8b 100644
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -69,7 +69,7 @@ static int __init ppc44x_probe(void)
for (i = 0; i < ARRAY_SIZE(board); i++) {
if (of_flat_dt_is_compatible(root, board[i])) {
- ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
+ ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
return 1;
}
}
diff --git a/arch/powerpc/platforms/44x/sam440ep.c b/arch/powerpc/platforms/44x/sam440ep.c
index 47f10e647735..a78e8eb6da41 100644
--- a/arch/powerpc/platforms/44x/sam440ep.c
+++ b/arch/powerpc/platforms/44x/sam440ep.c
@@ -51,7 +51,7 @@ static int __init sam440ep_probe(void)
if (!of_flat_dt_is_compatible(root, "acube,sam440ep"))
return 0;
- ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
+ ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
return 1;
}
diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
index fe92e65103ed..b5c753db125e 100644
--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
@@ -3,7 +3,6 @@
#include <asm/io.h>
#include <asm/time.h>
#include <asm/mpc52xx.h>
-#include "mpc52xx_pic.h"
/* defined in lite5200_sleep.S and only used here */
extern void lite5200_low_power(void __iomem *sram, void __iomem *mbar);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index ae7c34f37e1c..98367a0255f3 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -42,7 +42,7 @@ static struct of_device_id mpc52xx_bus_ids[] __initdata = {
* from interrupt context while node mapping (which calls ioremap())
* cannot be used at such point.
*/
-static spinlock_t mpc52xx_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(mpc52xx_lock);
static struct mpc52xx_gpt __iomem *mpc52xx_wdt;
static struct mpc52xx_cdm __iomem *mpc52xx_cdm;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
index b49a18527661..c3f2c21024e3 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
@@ -375,7 +375,7 @@ mpc52xx_add_bridge(struct device_node *node)
pr_debug("Adding MPC52xx PCI host bridge %s\n", node->full_name);
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
if (of_address_to_resource(node, 0, &rsrc) != 0) {
printk(KERN_ERR "Can't get %s resources\n", node->full_name);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 8479394e9ab4..72865e8e4b51 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -2,20 +2,100 @@
*
* Programmable Interrupt Controller functions for the Freescale MPC52xx.
*
+ * Copyright (C) 2008 Secret Lab Technologies Ltd.
* Copyright (C) 2006 bplan GmbH
+ * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 Montavista Software, Inc
*
* Based on the code from the 2.4 kernel by
* Dale Farnsworth <dfarnsworth@mvista.com> and Kent Borg.
*
- * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003 Montavista Software, Inc
- *
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*
*/
+/*
+ * This is the device driver for the MPC5200 interrupt controller.
+ *
+ * hardware overview
+ * -----------------
+ * The MPC5200 interrupt controller groups the all interrupt sources into
+ * three groups called 'critical', 'main', and 'peripheral'. The critical
+ * group has 3 irqs, External IRQ0, slice timer 0 irq, and wake from deep
+ * sleep. Main group include the other 3 external IRQs, slice timer 1, RTC,
+ * gpios, and the general purpose timers. Peripheral group contains the
+ * remaining irq sources from all of the on-chip peripherals (PSCs, Ethernet,
+ * USB, DMA, etc).
+ *
+ * virqs
+ * -----
+ * The Linux IRQ subsystem requires that each irq source be assigned a
+ * system wide unique IRQ number starting at 1 (0 means no irq). Since
+ * systems can have multiple interrupt controllers, the virtual IRQ (virq)
+ * infrastructure lets each interrupt controller to define a local set
+ * of IRQ numbers and the virq infrastructure maps those numbers into
+ * a unique range of the global IRQ# space.
+ *
+ * To define a range of virq numbers for this controller, this driver first
+ * assigns a number to each of the irq groups (called the level 1 or L1
+ * value). Within each group individual irq sources are also assigned a
+ * number, as defined by the MPC5200 user guide, and refers to it as the
+ * level 2 or L2 value. The virq number is determined by shifting up the
+ * L1 value by MPC52xx_IRQ_L1_OFFSET and ORing it with the L2 value.
+ *
+ * For example, the TMR0 interrupt is irq 9 in the main group. The
+ * virq for TMR0 is calculated by ((1 << MPC52xx_IRQ_L1_OFFSET) | 9).
+ *
+ * The observant reader will also notice that this driver defines a 4th
+ * interrupt group called 'bestcomm'. The bestcomm group isn't physically
+ * part of the MPC5200 interrupt controller, but it is used here to assign
+ * a separate virq number for each bestcomm task (since any of the 16
+ * bestcomm tasks can cause the bestcomm interrupt to be raised). When a
+ * bestcomm interrupt occurs (peripheral group, irq 0) this driver determines
+ * which task needs servicing and returns the irq number for that task. This
+ * allows drivers which use bestcomm to define their own interrupt handlers.
+ *
+ * irq_chip structures
+ * -------------------
+ * For actually manipulating IRQs (masking, enabling, clearing, etc) this
+ * driver defines four separate 'irq_chip' structures, one for the main
+ * group, one for the peripherals group, one for the bestcomm group and one
+ * for external interrupts. The irq_chip structures provide the hooks needed
+ * to manipulate each IRQ source, and since each group is has a separate set
+ * of registers for controlling the irq, it makes sense to divide up the
+ * hooks along those lines.
+ *
+ * You'll notice that there is not an irq_chip for the critical group and
+ * you'll also notice that there is an irq_chip defined for external
+ * interrupts even though there is no external interrupt group. The reason
+ * for this is that the four external interrupts are all managed with the same
+ * register even though one of the external IRQs is in the critical group and
+ * the other three are in the main group. For this reason it makes sense for
+ * the 4 external irqs to be managed using a separate set of hooks. The
+ * reason there is no crit irq_chip is that of the 3 irqs in the critical
+ * group, only external interrupt is actually support at this time by this
+ * driver and since external interrupt is the only one used, it can just
+ * be directed to make use of the external irq irq_chip.
+ *
+ * device tree bindings
+ * --------------------
+ * The device tree bindings for this controller reflect the two level
+ * organization of irqs in the device. #interrupt-cells = <3> where the
+ * first cell is the group number [0..3], the second cell is the irq
+ * number in the group, and the third cell is the sense type (level/edge).
+ * For reference, the following is a list of the interrupt property values
+ * associated with external interrupt sources on the MPC5200 (just because
+ * it is non-obvious to determine what the interrupts property should be
+ * when reading the mpc5200 manual and it is a frequently asked question).
+ *
+ * External interrupts:
+ * <0 0 n> external irq0, n is sense (n=0: level high,
+ * <1 1 n> external irq1, n is sense n=1: edge rising,
+ * <1 2 n> external irq2, n is sense n=2: edge falling,
+ * <1 3 n> external irq3, n is sense n=3: level low)
+ */
#undef DEBUG
#include <linux/interrupt.h>
@@ -24,11 +104,19 @@
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/mpc52xx.h>
-#include "mpc52xx_pic.h"
-/*
- *
-*/
+/* HW IRQ mapping */
+#define MPC52xx_IRQ_L1_CRIT (0)
+#define MPC52xx_IRQ_L1_MAIN (1)
+#define MPC52xx_IRQ_L1_PERP (2)
+#define MPC52xx_IRQ_L1_SDMA (3)
+
+#define MPC52xx_IRQ_L1_OFFSET (6)
+#define MPC52xx_IRQ_L1_MASK (0x00c0)
+#define MPC52xx_IRQ_L2_MASK (0x003f)
+
+#define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0)
+
/* MPC5200 device tree match tables */
static struct of_device_id mpc52xx_pic_ids[] __initdata = {
@@ -53,10 +141,7 @@ static unsigned char mpc52xx_map_senses[4] = {
IRQ_TYPE_LEVEL_LOW,
};
-/*
- *
-*/
-
+/* Utility functions */
static inline void io_be_setbit(u32 __iomem *addr, int bitno)
{
out_be32(addr, in_be32(addr) | (1 << bitno));
@@ -69,15 +154,14 @@ static inline void io_be_clrbit(u32 __iomem *addr, int bitno)
/*
* IRQ[0-3] interrupt irq_chip
-*/
-
+ */
static void mpc52xx_extirq_mask(unsigned int virq)
{
int irq;
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -90,7 +174,7 @@ static void mpc52xx_extirq_unmask(unsigned int virq)
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -103,7 +187,7 @@ static void mpc52xx_extirq_ack(unsigned int virq)
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -117,7 +201,7 @@ static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type)
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, irq, l2irq, flow_type);
@@ -156,15 +240,14 @@ static struct irq_chip mpc52xx_extirq_irqchip = {
/*
* Main interrupt irq_chip
-*/
-
+ */
static void mpc52xx_main_mask(unsigned int virq)
{
int irq;
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -177,7 +260,7 @@ static void mpc52xx_main_unmask(unsigned int virq)
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -193,15 +276,14 @@ static struct irq_chip mpc52xx_main_irqchip = {
/*
* Peripherals interrupt irq_chip
-*/
-
+ */
static void mpc52xx_periph_mask(unsigned int virq)
{
int irq;
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -214,7 +296,7 @@ static void mpc52xx_periph_unmask(unsigned int virq)
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -230,15 +312,14 @@ static struct irq_chip mpc52xx_periph_irqchip = {
/*
* SDMA interrupt irq_chip
-*/
-
+ */
static void mpc52xx_sdma_mask(unsigned int virq)
{
int irq;
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -251,7 +332,7 @@ static void mpc52xx_sdma_unmask(unsigned int virq)
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -264,7 +345,7 @@ static void mpc52xx_sdma_ack(unsigned int virq)
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -278,13 +359,12 @@ static struct irq_chip mpc52xx_sdma_irqchip = {
.ack = mpc52xx_sdma_ack,
};
-/*
- * irq_host
-*/
-
+/**
+ * mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property
+ */
static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
- u32 * intspec, unsigned int intsize,
- irq_hw_number_t * out_hwirq,
+ u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
{
int intrvect_l1;
@@ -299,10 +379,9 @@ static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
intrvect_l2 = (int)intspec[1];
intrvect_type = (int)intspec[2];
- intrvect_linux =
- (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) & MPC52xx_IRQ_L1_MASK;
- intrvect_linux |=
- (intrvect_l2 << MPC52xx_IRQ_L2_OFFSET) & MPC52xx_IRQ_L2_MASK;
+ intrvect_linux = (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) &
+ MPC52xx_IRQ_L1_MASK;
+ intrvect_linux |= intrvect_l2 & MPC52xx_IRQ_L2_MASK;
pr_debug("return %x, l1=%d, l2=%d\n", intrvect_linux, intrvect_l1,
intrvect_l2);
@@ -313,11 +392,11 @@ static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
return 0;
}
-/*
- * this function retrieves the correct IRQ type out
- * of the MPC regs
- * Only externals IRQs needs this
-*/
+/**
+ * mpc52xx_irqx_gettype - determine the IRQ sense type (level/edge)
+ *
+ * Only external IRQs need this.
+ */
static int mpc52xx_irqx_gettype(int irq)
{
int type;
@@ -329,6 +408,9 @@ static int mpc52xx_irqx_gettype(int irq)
return mpc52xx_map_senses[type];
}
+/**
+ * mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure
+ */
static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t irq)
{
@@ -339,7 +421,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
int type;
l1irq = (irq & MPC52xx_IRQ_L1_MASK) >> MPC52xx_IRQ_L1_OFFSET;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
/*
* Most of ours IRQs will be level low
@@ -379,8 +461,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
break;
default:
- pr_debug("%s: Error, unknown L1 IRQ (0x%x)\n", __func__, l1irq);
- printk(KERN_ERR "Unknow IRQ!\n");
+ pr_err("%s: invalid virq requested (0x%x)\n", __func__, virq);
return -EINVAL;
}
@@ -406,10 +487,15 @@ static struct irq_host_ops mpc52xx_irqhost_ops = {
.map = mpc52xx_irqhost_map,
};
-/*
- * init (public)
-*/
-
+/**
+ * mpc52xx_init_irq - Initialize and register with the virq subsystem
+ *
+ * Hook for setting up IRQs on an mpc5200 system. A pointer to this function
+ * is to be put into the machine definition structure.
+ *
+ * This function searches the device tree for an MPC5200 interrupt controller,
+ * initializes it, and registers it with the virq subsystem.
+ */
void __init mpc52xx_init_irq(void)
{
u32 intr_ctrl;
@@ -454,7 +540,6 @@ void __init mpc52xx_init_irq(void)
* As last step, add an irq host to translate the real
* hw irq information provided by the ofw to linux virq
*/
-
mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_HOST_MAP_LINEAR,
MPC52xx_IRQ_HIGHTESTHWIRQ,
&mpc52xx_irqhost_ops, -1);
@@ -462,12 +547,38 @@ void __init mpc52xx_init_irq(void)
if (!mpc52xx_irqhost)
panic(__FILE__ ": Cannot allocate the IRQ host\n");
- printk(KERN_INFO "MPC52xx PIC is up and running!\n");
+ irq_set_default_host(mpc52xx_irqhost);
+
+ pr_info("MPC52xx PIC is up and running!\n");
}
-/*
- * get_irq (public)
-*/
+/**
+ * mpc52xx_get_irq - Get pending interrupt number hook function
+ *
+ * Called by the interupt handler to determine what IRQ handler needs to be
+ * executed.
+ *
+ * Status of pending interrupts is determined by reading the encoded status
+ * register. The encoded status register has three fields; one for each of the
+ * types of interrupts defined by the controller - 'critical', 'main' and
+ * 'peripheral'. This function reads the status register and returns the IRQ
+ * number associated with the highest priority pending interrupt. 'Critical'
+ * interrupts have the highest priority, followed by 'main' interrupts, and
+ * then 'peripheral'.
+ *
+ * The mpc5200 interrupt controller can be configured to boost the priority
+ * of individual 'peripheral' interrupts. If this is the case then a special
+ * value will appear in either the crit or main fields indicating a high
+ * or medium priority peripheral irq has occurred.
+ *
+ * This function checks each of the 3 irq request fields and returns the
+ * first pending interrupt that it finds.
+ *
+ * This function also identifies a 4th type of interrupt; 'bestcomm'. Each
+ * bestcomm DMA task can raise the bestcomm peripheral interrupt. When this
+ * occurs at task-specific IRQ# is decoded so that each task can have its
+ * own IRQ handler.
+ */
unsigned int mpc52xx_get_irq(void)
{
u32 status;
@@ -478,25 +589,21 @@ unsigned int mpc52xx_get_irq(void)
irq = (status >> 8) & 0x3;
if (irq == 2) /* high priority peripheral */
goto peripheral;
- irq |= (MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET) &
- MPC52xx_IRQ_L1_MASK;
+ irq |= (MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET);
} else if (status & 0x00200000) { /* main */
irq = (status >> 16) & 0x1f;
if (irq == 4) /* low priority peripheral */
goto peripheral;
- irq |= (MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET) &
- MPC52xx_IRQ_L1_MASK;
+ irq |= (MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET);
} else if (status & 0x20000000) { /* peripheral */
peripheral:
irq = (status >> 24) & 0x1f;
if (irq == 0) { /* bestcomm */
status = in_be32(&sdma->IntPend);
irq = ffs(status) - 1;
- irq |= (MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET) &
- MPC52xx_IRQ_L1_MASK;
+ irq |= (MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET);
} else {
- irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET) &
- MPC52xx_IRQ_L1_MASK;
+ irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET);
}
}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.h b/arch/powerpc/platforms/52xx/mpc52xx_pic.h
deleted file mode 100644
index 1a26bcdb3049..000000000000
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Header file for Freescale MPC52xx Interrupt controller
- *
- * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003 MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __POWERPC_SYSDEV_MPC52xx_PIC_H__
-#define __POWERPC_SYSDEV_MPC52xx_PIC_H__
-
-#include <asm/types.h>
-
-
-/* HW IRQ mapping */
-#define MPC52xx_IRQ_L1_CRIT (0)
-#define MPC52xx_IRQ_L1_MAIN (1)
-#define MPC52xx_IRQ_L1_PERP (2)
-#define MPC52xx_IRQ_L1_SDMA (3)
-
-#define MPC52xx_IRQ_L1_OFFSET (6)
-#define MPC52xx_IRQ_L1_MASK (0x00c0)
-
-#define MPC52xx_IRQ_L2_OFFSET (0)
-#define MPC52xx_IRQ_L2_MASK (0x003f)
-
-#define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0)
-
-
-/* Interrupt controller Register set */
-struct mpc52xx_intr {
- u32 per_mask; /* INTR + 0x00 */
- u32 per_pri1; /* INTR + 0x04 */
- u32 per_pri2; /* INTR + 0x08 */
- u32 per_pri3; /* INTR + 0x0c */
- u32 ctrl; /* INTR + 0x10 */
- u32 main_mask; /* INTR + 0x14 */
- u32 main_pri1; /* INTR + 0x18 */
- u32 main_pri2; /* INTR + 0x1c */
- u32 reserved1; /* INTR + 0x20 */
- u32 enc_status; /* INTR + 0x24 */
- u32 crit_status; /* INTR + 0x28 */
- u32 main_status; /* INTR + 0x2c */
- u32 per_status; /* INTR + 0x30 */
- u32 reserved2; /* INTR + 0x34 */
- u32 per_error; /* INTR + 0x38 */
-};
-
-#endif /* __POWERPC_SYSDEV_MPC52xx_PIC_H__ */
-
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
index c72d3304387f..a55b0b6813ed 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
@@ -5,9 +5,6 @@
#include <asm/cacheflush.h>
#include <asm/mpc52xx.h>
-#include "mpc52xx_pic.h"
-
-
/* these are defined in mpc52xx_sleep.S, and only used here */
extern void mpc52xx_deep_sleep(void __iomem *sram, void __iomem *sdram_regs,
struct mpc52xx_cdm __iomem *, struct mpc52xx_intr __iomem*);
diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c
index 1b75902fad64..9761a59f175f 100644
--- a/arch/powerpc/platforms/82xx/pq2.c
+++ b/arch/powerpc/platforms/82xx/pq2.c
@@ -53,7 +53,7 @@ static void __init pq2_pci_add_bridge(struct device_node *np)
if (of_address_to_resource(np, 0, &r) || r.end - r.start < 0x10b)
goto err;
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
hose = pcibios_alloc_controller(np);
if (!hose)
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index a428f8d1ac80..5177bdd2c62a 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -42,7 +42,7 @@ static void __init mpc831x_rdb_setup_arch(void)
mpc831x_usb_cfg();
}
-void __init mpc831x_rdb_init_IRQ(void)
+static void __init mpc831x_rdb_init_IRQ(void)
{
struct device_node *np;
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index ec43477caa63..ec0b401bc9cf 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -49,8 +49,6 @@
#define DBG(fmt...)
#endif
-static u8 *bcsr_regs = NULL;
-
/* ************************************************************************
*
* Setup the architecture
@@ -59,13 +57,14 @@ static u8 *bcsr_regs = NULL;
static void __init mpc832x_sys_setup_arch(void)
{
struct device_node *np;
+ u8 __iomem *bcsr_regs = NULL;
if (ppc_md.progress)
ppc_md.progress("mpc832x_sys_setup_arch()", 0);
/* Map BCSR area */
np = of_find_node_by_name(NULL, "bcsr");
- if (np != 0) {
+ if (np) {
struct resource res;
of_address_to_resource(np, 0, &res);
@@ -93,9 +92,9 @@ static void __init mpc832x_sys_setup_arch(void)
!= NULL){
/* Reset the Ethernet PHYs */
#define BCSR8_FETH_RST 0x50
- bcsr_regs[8] &= ~BCSR8_FETH_RST;
+ clrbits8(&bcsr_regs[8], BCSR8_FETH_RST);
udelay(1000);
- bcsr_regs[8] |= BCSR8_FETH_RST;
+ setbits8(&bcsr_regs[8], BCSR8_FETH_RST);
iounmap(bcsr_regs);
of_node_put(np);
}
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 0300268ce5b8..2a1295f19832 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -38,6 +38,7 @@
#define DBG(fmt...)
#endif
+#ifdef CONFIG_QUICC_ENGINE
static void mpc83xx_spi_activate_cs(u8 cs, u8 polarity)
{
pr_debug("%s %d %d\n", __func__, cs, polarity);
@@ -77,8 +78,8 @@ static int __init mpc832x_spi_init(void)
mpc83xx_spi_activate_cs,
mpc83xx_spi_deactivate_cs);
}
-
machine_device_initcall(mpc832x_rdb, mpc832x_spi_init);
+#endif /* CONFIG_QUICC_ENGINE */
/* ************************************************************************
*
@@ -130,7 +131,7 @@ static int __init mpc832x_declare_of_platform_devices(void)
}
machine_device_initcall(mpc832x_rdb, mpc832x_declare_of_platform_devices);
-void __init mpc832x_rdb_init_IRQ(void)
+static void __init mpc832x_rdb_init_IRQ(void)
{
struct device_node *np;
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index 9d46e5bdd101..09e9d6fb7411 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -18,6 +18,7 @@
#include <linux/stddef.h>
#include <linux/kernel.h>
+#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/reboot.h>
@@ -43,6 +44,7 @@
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
+#include <sysdev/simple_gpio.h>
#include <asm/qe.h>
#include <asm/qe_ic.h>
@@ -55,8 +57,6 @@
#define DBG(fmt...)
#endif
-static u8 *bcsr_regs = NULL;
-
/* ************************************************************************
*
* Setup the architecture
@@ -65,13 +65,14 @@ static u8 *bcsr_regs = NULL;
static void __init mpc836x_mds_setup_arch(void)
{
struct device_node *np;
+ u8 __iomem *bcsr_regs = NULL;
if (ppc_md.progress)
ppc_md.progress("mpc836x_mds_setup_arch()", 0);
/* Map BCSR area */
np = of_find_node_by_name(NULL, "bcsr");
- if (np != 0) {
+ if (np) {
struct resource res;
of_address_to_resource(np, 0, &res);
@@ -93,6 +94,16 @@ static void __init mpc836x_mds_setup_arch(void)
for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;)
par_io_of_config(np);
+#ifdef CONFIG_QE_USB
+ /* Must fixup Par IO before QE GPIO chips are registered. */
+ par_io_config_pin(1, 2, 1, 0, 3, 0); /* USBOE */
+ par_io_config_pin(1, 3, 1, 0, 3, 0); /* USBTP */
+ par_io_config_pin(1, 8, 1, 0, 1, 0); /* USBTN */
+ par_io_config_pin(1, 10, 2, 0, 3, 0); /* USBRXD */
+ par_io_config_pin(1, 9, 2, 1, 3, 0); /* USBRP */
+ par_io_config_pin(1, 11, 2, 1, 3, 0); /* USBRN */
+ par_io_config_pin(2, 20, 2, 0, 1, 0); /* CLK21 */
+#endif /* CONFIG_QE_USB */
}
if ((np = of_find_compatible_node(NULL, "network", "ucc_geth"))
@@ -151,6 +162,70 @@ static int __init mpc836x_declare_of_platform_devices(void)
}
machine_device_initcall(mpc836x_mds, mpc836x_declare_of_platform_devices);
+#ifdef CONFIG_QE_USB
+static int __init mpc836x_usb_cfg(void)
+{
+ u8 __iomem *bcsr;
+ struct device_node *np;
+ const char *mode;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,mpc8360mds-bcsr");
+ if (!np)
+ return -ENODEV;
+
+ bcsr = of_iomap(np, 0);
+ of_node_put(np);
+ if (!bcsr)
+ return -ENOMEM;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,mpc8323-qe-usb");
+ if (!np) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+#define BCSR8_TSEC1M_MASK (0x3 << 6)
+#define BCSR8_TSEC1M_RGMII (0x0 << 6)
+#define BCSR8_TSEC2M_MASK (0x3 << 4)
+#define BCSR8_TSEC2M_RGMII (0x0 << 4)
+ /*
+ * Default is GMII (2), but we should set it to RGMII (0) if we use
+ * USB (Eth PHY is in RGMII mode anyway).
+ */
+ clrsetbits_8(&bcsr[8], BCSR8_TSEC1M_MASK | BCSR8_TSEC2M_MASK,
+ BCSR8_TSEC1M_RGMII | BCSR8_TSEC2M_RGMII);
+
+#define BCSR13_USBMASK 0x0f
+#define BCSR13_nUSBEN 0x08 /* 1 - Disable, 0 - Enable */
+#define BCSR13_USBSPEED 0x04 /* 1 - Full, 0 - Low */
+#define BCSR13_USBMODE 0x02 /* 1 - Host, 0 - Function */
+#define BCSR13_nUSBVCC 0x01 /* 1 - gets VBUS, 0 - supplies VBUS */
+
+ clrsetbits_8(&bcsr[13], BCSR13_USBMASK, BCSR13_USBSPEED);
+
+ mode = of_get_property(np, "mode", NULL);
+ if (mode && !strcmp(mode, "peripheral")) {
+ setbits8(&bcsr[13], BCSR13_nUSBVCC);
+ qe_usb_clock_set(QE_CLK21, 48000000);
+ } else {
+ setbits8(&bcsr[13], BCSR13_USBMODE);
+ /*
+ * The BCSR GPIOs are used to control power and
+ * speed of the USB transceiver. This is needed for
+ * the USB Host only.
+ */
+ simple_gpiochip_init("fsl,mpc8360mds-bcsr-gpio");
+ }
+
+ of_node_put(np);
+err:
+ iounmap(bcsr);
+ return ret;
+}
+machine_arch_initcall(mpc836x_mds, mpc836x_usb_cfg);
+#endif /* CONFIG_QE_USB */
+
static void __init mpc836x_mds_init_IRQ(void)
{
struct device_node *np;
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index a5273bb28e1b..b0090aac9642 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -51,8 +51,9 @@ static void __init mpc836x_rdk_setup_arch(void)
for_each_compatible_node(np, "pci", "fsl,mpc8349-pci")
mpc83xx_add_bridge(np);
#endif
-
+#ifdef CONFIG_QUICC_ENGINE
qe_reset();
+#endif
}
static void __init mpc836x_rdk_init_IRQ(void)
@@ -71,13 +72,14 @@ static void __init mpc836x_rdk_init_IRQ(void)
*/
ipic_set_default_priority();
of_node_put(np);
-
+#ifdef CONFIG_QUICC_ENGINE
np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
if (!np)
return;
qe_ic_init(np, 0, qe_ic_cascade_low_ipic, qe_ic_cascade_high_ipic);
of_node_put(np);
+#endif
}
/*
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index 8bb13c807142..530ef990ca7c 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -26,7 +26,6 @@
#define BCSR12_USB_SER_MASK 0x8a
#define BCSR12_USB_SER_PIN 0x80
#define BCSR12_USB_SER_DEVICE 0x02
-extern int mpc837x_usb_cfg(void);
static int mpc837xmds_usb_cfg(void)
{
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index da030afa2e2c..1d096545322b 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -21,8 +21,6 @@
#include "mpc83xx.h"
-extern int mpc837x_usb_cfg(void);
-
/* ************************************************************************
*
* Setup the architecture
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 2a7cbabb410a..83cfe51526ec 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -61,6 +61,7 @@
extern void mpc83xx_restart(char *cmd);
extern long mpc83xx_time_init(void);
+extern int mpc837x_usb_cfg(void);
extern int mpc834x_usb_cfg(void);
extern int mpc831x_usb_cfg(void);
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index cb3054e1001d..f0798c09980f 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -1,6 +1,8 @@
#
# Makefile for the PowerPC 85xx linux kernel.
#
+obj-$(CONFIG_SMP) += smp.o
+
obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o
obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads.o
obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 613bf8c2e30d..7326d904202c 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -63,6 +63,7 @@ void __init mpc85xx_ds_pic_init(void)
struct device_node *cascade_node = NULL;
int cascade_irq;
#endif
+ unsigned long root = of_get_flat_dt_root();
np = of_find_node_by_type(NULL, "open-pic");
if (np == NULL) {
@@ -76,11 +77,19 @@ void __init mpc85xx_ds_pic_init(void)
return;
}
- mpic = mpic_alloc(np, r.start,
+ if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
+ mpic = mpic_alloc(np, r.start,
+ MPIC_PRIMARY |
+ MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
+ 0, 256, " OpenPIC ");
+ } else {
+ mpic = mpic_alloc(np, r.start,
MPIC_PRIMARY | MPIC_WANTS_RESET |
MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
+ }
+
BUG_ON(mpic == NULL);
of_node_put(np);
@@ -139,6 +148,9 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
/*
* Setup the architecture
*/
+#ifdef CONFIG_SMP
+extern void __init mpc85xx_smp_init(void);
+#endif
static void __init mpc85xx_ds_setup_arch(void)
{
#ifdef CONFIG_PCI
@@ -164,6 +176,10 @@ static void __init mpc85xx_ds_setup_arch(void)
ppc_md.pci_exclude_device = mpc85xx_exclude_device;
#endif
+#ifdef CONFIG_SMP
+ mpc85xx_smp_init();
+#endif
+
printk("MPC85xx DS board from Freescale Semiconductor\n");
}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 2494c5155919..658a36fab3ab 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -231,7 +231,7 @@ static void __init mpc85xx_mds_setup_arch(void)
static int __init board_fixups(void)
{
- char phy_id[BUS_ID_SIZE];
+ char phy_id[20];
char *compstrs[2] = {"fsl,gianfar-mdio", "fsl,ucc-mdio"};
struct device_node *mdio;
struct resource res;
@@ -241,13 +241,15 @@ static int __init board_fixups(void)
mdio = of_find_compatible_node(NULL, NULL, compstrs[i]);
of_address_to_resource(mdio, 0, &res);
- snprintf(phy_id, BUS_ID_SIZE, "%x:%02x", res.start, 1);
+ snprintf(phy_id, sizeof(phy_id), "%llx:%02x",
+ (unsigned long long)res.start, 1);
phy_register_fixup_for_id(phy_id, mpc8568_fixup_125_clock);
phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups);
/* Register a workaround for errata */
- snprintf(phy_id, BUS_ID_SIZE, "%x:%02x", res.start, 7);
+ snprintf(phy_id, sizeof(phy_id), "%llx:%02x",
+ (unsigned long long)res.start, 7);
phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups);
of_node_put(mdio);
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
new file mode 100644
index 000000000000..79a0df17078b
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -0,0 +1,105 @@
+/*
+ * Author: Andy Fleming <afleming@freescale.com>
+ * Kumar Gala <galak@kernel.crashing.org>
+ *
+ * Copyright 2006-2008 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+
+#include <asm/machdep.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/mpic.h>
+#include <asm/cacheflush.h>
+
+#include <sysdev/fsl_soc.h>
+
+extern volatile unsigned long __secondary_hold_acknowledge;
+extern void __early_start(void);
+
+#define BOOT_ENTRY_ADDR_UPPER 0
+#define BOOT_ENTRY_ADDR_LOWER 1
+#define BOOT_ENTRY_R3_UPPER 2
+#define BOOT_ENTRY_R3_LOWER 3
+#define BOOT_ENTRY_RESV 4
+#define BOOT_ENTRY_PIR 5
+#define BOOT_ENTRY_R6_UPPER 6
+#define BOOT_ENTRY_R6_LOWER 7
+#define NUM_BOOT_ENTRY 8
+#define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32))
+
+static void __init
+smp_85xx_kick_cpu(int nr)
+{
+ unsigned long flags;
+ const u64 *cpu_rel_addr;
+ __iomem u32 *bptr_vaddr;
+ struct device_node *np;
+ int n = 0;
+
+ WARN_ON (nr < 0 || nr >= NR_CPUS);
+
+ pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
+
+ local_irq_save(flags);
+
+ np = of_get_cpu_node(nr, NULL);
+ cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
+
+ if (cpu_rel_addr == NULL) {
+ printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
+ local_irq_restore(flags);
+ return;
+ }
+
+ /* Map the spin table */
+ bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+
+ out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
+ out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
+
+ /* Wait a bit for the CPU to ack. */
+ while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
+ mdelay(1);
+
+ iounmap(bptr_vaddr);
+
+ local_irq_restore(flags);
+
+ pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
+}
+
+static void __init
+smp_85xx_setup_cpu(int cpu_nr)
+{
+ mpic_setup_this_cpu();
+
+ /* Clear any pending timer interrupts */
+ mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
+
+ /* Enable decrementer interrupt */
+ mtspr(SPRN_TCR, TCR_DIE);
+}
+
+struct smp_ops_t smp_85xx_ops = {
+ .message_pass = smp_mpic_message_pass,
+ .probe = smp_mpic_probe,
+ .kick_cpu = smp_85xx_kick_cpu,
+ .setup_cpu = smp_85xx_setup_cpu,
+};
+
+void __init
+mpc85xx_smp_init(void)
+{
+ smp_ops = &smp_85xx_ops;
+}
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 77dd797a2580..8e5693935975 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -34,6 +34,8 @@ config MPC8610_HPCD
config GEF_SBC610
bool "GE Fanuc SBC610"
select DEFAULT_UIMAGE
+ select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
select HAS_RAPIDIO
help
This option enables support for GE Fanuc's SBC610.
diff --git a/arch/powerpc/platforms/86xx/Makefile b/arch/powerpc/platforms/86xx/Makefile
index 4a56ff619afd..31e540c2ebbc 100644
--- a/arch/powerpc/platforms/86xx/Makefile
+++ b/arch/powerpc/platforms/86xx/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_SMP) += mpc86xx_smp.o
obj-$(CONFIG_MPC8641_HPCN) += mpc86xx_hpcn.o
obj-$(CONFIG_SBC8641D) += sbc8641d.o
obj-$(CONFIG_MPC8610_HPCD) += mpc8610_hpcd.o
-obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o gef_pic.o
+gef-gpio-$(CONFIG_GPIOLIB) += gef_gpio.o
+obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o gef_pic.o $(gef-gpio-y)
diff --git a/arch/powerpc/platforms/86xx/gef_gpio.c b/arch/powerpc/platforms/86xx/gef_gpio.c
new file mode 100644
index 000000000000..85b2800f4cb7
--- /dev/null
+++ b/arch/powerpc/platforms/86xx/gef_gpio.c
@@ -0,0 +1,143 @@
+/*
+ * Driver for GE Fanuc's FPGA based GPIO pins
+ *
+ * Author: Martyn Welch <martyn.welch@gefanuc.com>
+ *
+ * 2008 (c) GE Fanuc Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/* TODO
+ *
+ * Configuration of output modes (totem-pole/open-drain)
+ * Interrupt configuration - interrupts are always generated the FPGA relies on
+ * the I/O interrupt controllers mask to stop them propergating
+ */
+
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+
+#define GEF_GPIO_DIRECT 0x00
+#define GEF_GPIO_IN 0x04
+#define GEF_GPIO_OUT 0x08
+#define GEF_GPIO_TRIG 0x0C
+#define GEF_GPIO_POLAR_A 0x10
+#define GEF_GPIO_POLAR_B 0x14
+#define GEF_GPIO_INT_STAT 0x18
+#define GEF_GPIO_OVERRUN 0x1C
+#define GEF_GPIO_MODE 0x20
+
+#define NUM_GPIO 19
+
+static void _gef_gpio_set(void __iomem *reg, unsigned int offset, int value)
+{
+ unsigned int data;
+
+ data = ioread32be(reg);
+ /* value: 0=low; 1=high */
+ if (value & 0x1)
+ data = data | (0x1 << offset);
+ else
+ data = data & ~(0x1 << offset);
+
+ iowrite32be(data, reg);
+}
+
+
+static int gef_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned int data;
+ struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
+
+ data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
+ data = data | (0x1 << offset);
+ iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
+
+ return 0;
+}
+
+static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
+{
+ unsigned int data;
+ struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
+
+ /* Set direction before switching to input */
+ _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
+
+ data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
+ data = data & ~(0x1 << offset);
+ iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
+
+ return 0;
+}
+
+static int gef_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned int data;
+ int state = 0;
+ struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
+
+ data = ioread32be(mmchip->regs + GEF_GPIO_IN);
+ state = (int)((data >> offset) & 0x1);
+
+ return state;
+}
+
+static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
+
+ _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
+}
+
+static int __init gef_gpio_init(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, NULL, "gef,sbc610-gpio") {
+ int retval;
+ struct of_mm_gpio_chip *gef_gpio_chip;
+
+ pr_debug("%s: Initialising GEF GPIO\n", np->full_name);
+
+ /* Allocate chip structure */
+ gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
+ if (!gef_gpio_chip) {
+ pr_err("%s: Unable to allocate structure\n",
+ np->full_name);
+ continue;
+ }
+
+ /* Setup pointers to chip functions */
+ gef_gpio_chip->of_gc.gpio_cells = 2;
+ gef_gpio_chip->of_gc.gc.ngpio = NUM_GPIO;
+ gef_gpio_chip->of_gc.gc.direction_input = gef_gpio_dir_in;
+ gef_gpio_chip->of_gc.gc.direction_output = gef_gpio_dir_out;
+ gef_gpio_chip->of_gc.gc.get = gef_gpio_get;
+ gef_gpio_chip->of_gc.gc.set = gef_gpio_set;
+
+ /* This function adds a memory mapped GPIO chip */
+ retval = of_mm_gpiochip_add(np, gef_gpio_chip);
+ if (retval) {
+ kfree(gef_gpio_chip);
+ pr_err("%s: Unable to add GPIO\n", np->full_name);
+ }
+ }
+
+ return 0;
+};
+arch_initcall(gef_gpio_init);
+
+MODULE_DESCRIPTION("GE Fanuc I/O FPGA GPIO driver");
+MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com");
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 47e956c871fe..47fe2bea9865 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -312,4 +312,15 @@ config MPC8xxx_GPIO
Say Y here if you're going to use hardware that connects to the
MPC831x/834x/837x/8572/8610 GPIOs.
+config SIMPLE_GPIO
+ bool "Support for simple, memory-mapped GPIO controllers"
+ depends on PPC
+ select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
+ help
+ Say Y here to support simple, memory-mapped GPIO controllers.
+ These are usually BCSRs used to control board's switches, LEDs,
+ chip-selects, Ethernet/USB PHY's power and various other small
+ on-board peripherals.
+
endmenu
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 548efa55c8fe..e868b5c50723 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -195,16 +195,24 @@ config SPE
config PPC_STD_MMU
bool
- depends on 6xx || POWER3 || POWER4 || PPC64
+ depends on 6xx || PPC64
default y
config PPC_STD_MMU_32
def_bool y
depends on PPC_STD_MMU && PPC32
+config PPC_STD_MMU_64
+ def_bool y
+ depends on PPC_STD_MMU && PPC64
+
+config PPC_MMU_NOHASH
+ def_bool y
+ depends on !PPC_STD_MMU
+
config PPC_MM_SLICES
bool
- default y if HUGETLB_PAGE || PPC_64K_PAGES
+ default y if HUGETLB_PAGE || (PPC_STD_MMU_64 && PPC_64K_PAGES)
default n
config VIRT_CPU_ACCOUNTING
@@ -223,7 +231,7 @@ config VIRT_CPU_ACCOUNTING
If in doubt, say Y here.
config SMP
- depends on PPC_STD_MMU
+ depends on PPC_STD_MMU || FSL_BOOKE
bool "Symmetric multi-processing support"
---help---
This enables support for systems with more than one CPU. If you have
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index c14d7d8d96c8..5cc3279559a4 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -2,13 +2,18 @@ config PPC_CELL
bool
default n
-config PPC_CELL_NATIVE
+config PPC_CELL_COMMON
bool
select PPC_CELL
select PPC_DCR_MMIO
- select PPC_OF_PLATFORM_PCI
select PPC_INDIRECT_IO
select PPC_NATIVE
+ select PPC_RTAS
+
+config PPC_CELL_NATIVE
+ bool
+ select PPC_CELL_COMMON
+ select PPC_OF_PLATFORM_PCI
select MPIC
select IBM_NEW_EMAC_EMAC4
select IBM_NEW_EMAC_RGMII
@@ -20,7 +25,6 @@ config PPC_IBM_CELL_BLADE
bool "IBM Cell Blade"
depends on PPC_MULTIPLATFORM && PPC64
select PPC_CELL_NATIVE
- select PPC_RTAS
select MMIO_NVRAM
select PPC_UDBG_16550
select UDBG_RTAS_CONSOLE
@@ -28,16 +32,17 @@ config PPC_IBM_CELL_BLADE
config PPC_CELLEB
bool "Toshiba's Cell Reference Set 'Celleb' Architecture"
depends on PPC_MULTIPLATFORM && PPC64
- select PPC_CELL
select PPC_CELL_NATIVE
- select PPC_RTAS
- select PPC_INDIRECT_IO
- select PPC_OF_PLATFORM_PCI
select HAS_TXX9_SERIAL
select PPC_UDBG_BEAT
select USB_OHCI_BIG_ENDIAN_MMIO
select USB_EHCI_BIG_ENDIAN_MMIO
+config PPC_CELL_QPACE
+ bool "IBM Cell - QPACE"
+ depends on PPC_MULTIPLATFORM && PPC64
+ select PPC_CELL_COMMON
+
menu "Cell Broadband Engine options"
depends on PPC_CELL
@@ -102,7 +107,7 @@ config PPC_IBM_CELL_POWERBUTTON
config CBE_THERM
tristate "CBE thermal support"
default m
- depends on CBE_RAS
+ depends on CBE_RAS && SPU_BASE
config CBE_CPUFREQ
tristate "CBE frequency scaling"
@@ -136,5 +141,5 @@ endmenu
config OPROFILE_CELL
def_bool y
- depends on PPC_CELL_NATIVE && (OPROFILE = m || OPROFILE = y)
+ depends on PPC_CELL_NATIVE && (OPROFILE = m || OPROFILE = y) && SPU_BASE
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 7fd830872c43..43eccb270301 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,7 +1,7 @@
-obj-$(CONFIG_PPC_CELL_NATIVE) += interrupt.o iommu.o setup.o \
- cbe_regs.o spider-pic.o \
- pervasive.o pmu.o io-workarounds.o \
- spider-pci.o
+obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o
+
+obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \
+ pmu.o io-workarounds.o spider-pci.o
obj-$(CONFIG_CBE_RAS) += ras.o
obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
@@ -14,13 +14,12 @@ obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o
ifeq ($(CONFIG_SMP),y)
obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o
+obj-$(CONFIG_PPC_CELL_QPACE) += smp.o
endif
# needed only when building loadable spufs.ko
-spu-priv1-$(CONFIG_PPC_CELL_NATIVE) += spu_priv1_mmio.o
-
-spu-manage-$(CONFIG_PPC_CELLEB) += spu_manage.o
-spu-manage-$(CONFIG_PPC_CELL_NATIVE) += spu_manage.o
+spu-priv1-$(CONFIG_PPC_CELL_COMMON) += spu_priv1_mmio.o
+spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o
obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
spu_notify.o \
@@ -31,6 +30,8 @@ obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
obj-$(CONFIG_PCI_MSI) += axon_msi.o
+# qpace setup
+obj-$(CONFIG_PPC_CELL_QPACE) += qpace_setup.o
# celleb stuff
ifeq ($(CONFIG_PPC_CELLEB),y)
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 2e67bd840e01..35b1ec492715 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -44,8 +44,8 @@ static DEFINE_SPINLOCK(beat_htab_lock);
static inline unsigned int beat_read_mask(unsigned hpte_group)
{
- unsigned long hpte_v[5];
unsigned long rmask = 0;
+ u64 hpte_v[5];
beat_read_htab_entries(0, hpte_group + 0, hpte_v);
if (!(hpte_v[0] & HPTE_V_BOLTED))
@@ -93,8 +93,7 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
int psize, int ssize)
{
unsigned long lpar_rc;
- unsigned long slot;
- unsigned long hpte_v, hpte_r;
+ u64 hpte_v, hpte_r, slot;
/* same as iseries */
if (vflags & HPTE_V_SECONDARY)
@@ -153,8 +152,9 @@ static long beat_lpar_hpte_remove(unsigned long hpte_group)
static unsigned long beat_lpar_hpte_getword0(unsigned long slot)
{
- unsigned long dword0, dword[5];
+ unsigned long dword0;
unsigned long lpar_rc;
+ u64 dword[5];
lpar_rc = beat_read_htab_entries(0, slot & ~3UL, dword);
@@ -170,7 +170,7 @@ static void beat_lpar_hptab_clear(void)
unsigned long size_bytes = 1UL << ppc64_pft_size;
unsigned long hpte_count = size_bytes >> 4;
int i;
- unsigned long dummy0, dummy1;
+ u64 dummy0, dummy1;
/* TODO: Use bulk call */
for (i = 0; i < hpte_count; i++)
@@ -189,7 +189,8 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
int psize, int ssize, int local)
{
unsigned long lpar_rc;
- unsigned long dummy0, dummy1, want_v;
+ u64 dummy0, dummy1;
+ unsigned long want_v;
want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M);
@@ -255,7 +256,8 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
unsigned long ea,
int psize, int ssize)
{
- unsigned long lpar_rc, slot, vsid, va, dummy0, dummy1;
+ unsigned long lpar_rc, slot, vsid, va;
+ u64 dummy0, dummy1;
vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
va = (vsid << 28) | (ea & 0x0fffffff);
@@ -276,7 +278,7 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
{
unsigned long want_v;
unsigned long lpar_rc;
- unsigned long dummy1, dummy2;
+ u64 dummy1, dummy2;
unsigned long flags;
DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
@@ -315,8 +317,7 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
int psize, int ssize)
{
unsigned long lpar_rc;
- unsigned long slot;
- unsigned long hpte_v, hpte_r;
+ u64 hpte_v, hpte_r, slot;
/* same as iseries */
if (vflags & HPTE_V_SECONDARY)
diff --git a/arch/powerpc/platforms/cell/beat_udbg.c b/arch/powerpc/platforms/cell/beat_udbg.c
index 6b418f6b6175..350735bc8888 100644
--- a/arch/powerpc/platforms/cell/beat_udbg.c
+++ b/arch/powerpc/platforms/cell/beat_udbg.c
@@ -40,8 +40,8 @@ static void udbg_putc_beat(char c)
}
/* Buffered chars getc */
-static long inbuflen;
-static long inbuf[2]; /* must be 2 longs */
+static u64 inbuflen;
+static u64 inbuf[2]; /* must be 2 u64s */
static int udbg_getc_poll_beat(void)
{
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
index 70fa7aef5edd..20472e487b6f 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
@@ -54,7 +54,7 @@ int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode)
{
struct cbe_pmd_regs __iomem *pmd_regs;
struct cbe_mic_tm_regs __iomem *mic_tm_regs;
- u64 flags;
+ unsigned long flags;
u64 value;
#ifdef DEBUG
long time;
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
index b11cb30decb2..07c234f6b2b6 100644
--- a/arch/powerpc/platforms/cell/celleb_setup.c
+++ b/arch/powerpc/platforms/cell/celleb_setup.c
@@ -45,7 +45,6 @@
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/kexec.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
@@ -226,9 +225,6 @@ define_machine(celleb_beat) {
.pci_setup_phb = celleb_setup_phb,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = beat_kexec_cpu_down,
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
#endif
};
@@ -248,9 +244,4 @@ define_machine(celleb_native) {
.pci_probe_mode = celleb_pci_probe_mode,
.pci_setup_phb = celleb_setup_phb,
.init_IRQ = celleb_init_IRQ_native,
-#ifdef CONFIG_KEXEC
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
-#endif
};
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 2d5bb22d6c09..28c04dab2633 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -148,7 +148,7 @@ static unsigned int iic_get_irq(void)
iic = &__get_cpu_var(iic);
*(unsigned long *) &pending =
- in_be64((unsigned long __iomem *) &iic->regs->pending_destr);
+ in_be64((u64 __iomem *) &iic->regs->pending_destr);
if (!(pending.flags & CBE_IIC_IRQ_VALID))
return NO_IRQ;
virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
index b5f84e8f0899..059cad6c3f69 100644
--- a/arch/powerpc/platforms/cell/io-workarounds.c
+++ b/arch/powerpc/platforms/cell/io-workarounds.c
@@ -130,14 +130,14 @@ static const struct ppc_pci_io __devinitconst iowa_pci_io = {
};
-static void __iomem *iowa_ioremap(unsigned long addr, unsigned long size,
+static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
unsigned long flags)
{
struct iowa_bus *bus;
void __iomem *res = __ioremap(addr, size, flags);
int busno;
- bus = iowa_pci_find(0, addr);
+ bus = iowa_pci_find(0, (unsigned long)addr);
if (bus != NULL) {
busno = bus - iowa_busses;
PCI_SET_ADDR_TOKEN(res, busno + 1);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 3168272ab0d7..88d94b59a7cb 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -150,8 +150,8 @@ static int cbe_nr_iommus;
static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
long n_ptes)
{
- unsigned long __iomem *reg;
- unsigned long val;
+ u64 __iomem *reg;
+ u64 val;
long n;
reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
@@ -1053,10 +1053,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
}
/* We must have dma-ranges properties for fixed mapping to work */
- for (np = NULL; (np = of_find_all_nodes(np));) {
- if (of_find_property(np, "dma-ranges", NULL))
- break;
- }
+ np = of_find_node_with_property(NULL, "dma-ranges");
of_node_put(np);
if (!np) {
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
new file mode 100644
index 000000000000..be84e6a16b30
--- /dev/null
+++ b/arch/powerpc/platforms/cell/qpace_setup.c
@@ -0,0 +1,152 @@
+/*
+ * linux/arch/powerpc/platforms/cell/qpace_setup.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Adapted from 'alpha' version by Gary Thomas
+ * Modified by Cort Dougan (cort@cs.nmt.edu)
+ * Modified by PPC64 Team, IBM Corp
+ * Modified by Cell Team, IBM Deutschland Entwicklung GmbH
+ * Modified by Benjamin Krill <ben@codiert.org>, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/console.h>
+#include <linux/of_platform.h>
+
+#include <asm/mmu.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/kexec.h>
+#include <asm/pgtable.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+#include <asm/dma.h>
+#include <asm/machdep.h>
+#include <asm/time.h>
+#include <asm/cputable.h>
+#include <asm/irq.h>
+#include <asm/spu.h>
+#include <asm/spu_priv1.h>
+#include <asm/udbg.h>
+#include <asm/cell-regs.h>
+
+#include "interrupt.h"
+#include "pervasive.h"
+#include "ras.h"
+#include "io-workarounds.h"
+
+static void qpace_show_cpuinfo(struct seq_file *m)
+{
+ struct device_node *root;
+ const char *model = "";
+
+ root = of_find_node_by_path("/");
+ if (root)
+ model = of_get_property(root, "model", NULL);
+ seq_printf(m, "machine\t\t: CHRP %s\n", model);
+ of_node_put(root);
+}
+
+static void qpace_progress(char *s, unsigned short hex)
+{
+ printk("*** %04x : %s\n", hex, s ? s : "");
+}
+
+static int __init qpace_publish_devices(void)
+{
+ int node;
+
+ /* Publish OF platform devices for southbridge IOs */
+ of_platform_bus_probe(NULL, NULL, NULL);
+
+ /* There is no device for the MIC memory controller, thus we create
+ * a platform device for it to attach the EDAC driver to.
+ */
+ for_each_online_node(node) {
+ if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL)
+ continue;
+ platform_device_register_simple("cbe-mic", node, NULL, 0);
+ }
+
+ return 0;
+}
+machine_subsys_initcall(qpace, qpace_publish_devices);
+
+extern int qpace_notify(struct device *dev)
+{
+ /* set dma_ops for of_platform bus */
+ if (dev->bus && dev->bus->name
+ && !strcmp(dev->bus->name, "of_platform"))
+ set_dma_ops(dev, &dma_direct_ops);
+
+ return 0;
+}
+
+static void __init qpace_setup_arch(void)
+{
+#ifdef CONFIG_SPU_BASE
+ spu_priv1_ops = &spu_priv1_mmio_ops;
+ spu_management_ops = &spu_management_of_ops;
+#endif
+
+ cbe_regs_init();
+
+#ifdef CONFIG_CBE_RAS
+ cbe_ras_init();
+#endif
+
+#ifdef CONFIG_SMP
+ smp_init_cell();
+#endif
+
+ /* init to some ~sane value until calibrate_delay() runs */
+ loops_per_jiffy = 50000000;
+
+ cbe_pervasive_init();
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#endif
+
+ /* set notifier function */
+ platform_notify = &qpace_notify;
+}
+
+static int __init qpace_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (!of_flat_dt_is_compatible(root, "IBM,QPACE"))
+ return 0;
+
+ hpte_init_native();
+
+ return 1;
+}
+
+define_machine(qpace) {
+ .name = "QPACE",
+ .probe = qpace_probe,
+ .setup_arch = qpace_setup_arch,
+ .show_cpuinfo = qpace_show_cpuinfo,
+ .restart = rtas_restart,
+ .power_off = rtas_power_off,
+ .halt = rtas_halt,
+ .get_boot_time = rtas_get_boot_time,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = qpace_progress,
+ .init_IRQ = iic_init_IRQ,
+#ifdef CONFIG_KEXEC
+ .machine_kexec = default_machine_kexec,
+ .machine_kexec_prepare = default_machine_kexec_prepare,
+ .machine_crash_shutdown = default_machine_crash_shutdown,
+#endif
+};
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index ab721b50fbba..59305369f6b2 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -35,7 +35,6 @@
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/kexec.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -289,9 +288,4 @@ define_machine(cell) {
.progress = cell_progress,
.init_IRQ = cell_init_irq,
.pci_setup_phb = cell_setup_phb,
-#ifdef CONFIG_KEXEC
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
-#endif
};
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
index 906a0a2a9fe1..1410443731eb 100644
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -80,10 +80,10 @@ static void cpu_affinity_set(struct spu *spu, int cpu)
u64 route;
if (nr_cpus_node(spu->node)) {
- cpumask_t spumask = node_to_cpumask(spu->node);
- cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu));
+ const struct cpumask *spumask = cpumask_of_node(spu->node),
+ *cpumask = cpumask_of_node(cpu_to_node(cpu));
- if (!cpus_intersects(spumask, cpumask))
+ if (!cpumask_intersects(spumask, cpumask))
return;
}
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 1b26071a86ca..7106b63d401b 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -273,12 +273,10 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
if (ctx->state == SPU_STATE_SAVED) {
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- & ~_PAGE_NO_CACHE);
+ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
} else {
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE);
+ vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
}
vm_insert_pfn(vma, address, pfn);
@@ -338,8 +336,7 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE);
+ vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
vma->vm_ops = &spufs_mem_mmap_vmops;
return 0;
@@ -452,8 +449,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_cntl_mmap_vmops;
return 0;
@@ -1155,8 +1151,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_signal1_mmap_vmops;
return 0;
@@ -1292,8 +1287,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_signal2_mmap_vmops;
return 0;
@@ -1414,8 +1408,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_mss_mmap_vmops;
return 0;
@@ -1476,8 +1469,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_psmap_mmap_vmops;
return 0;
@@ -1536,8 +1528,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_mfc_mmap_vmops;
return 0;
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index cb85d237e492..e309ef70a531 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -95,9 +95,8 @@ spufs_new_inode(struct super_block *sb, int mode)
goto out;
inode->i_mode = mode;
- inode->i_uid = current->fsuid;
- inode->i_gid = current->fsgid;
- inode->i_blocks = 0;
+ inode->i_uid = current_fsuid();
+ inode->i_gid = current_fsgid();
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
out:
return inode;
@@ -323,7 +322,7 @@ static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt)
goto out;
}
- filp = dentry_open(dentry, mnt, O_RDONLY);
+ filp = dentry_open(dentry, mnt, O_RDONLY, current_cred());
if (IS_ERR(filp)) {
put_unused_fd(ret);
ret = PTR_ERR(filp);
@@ -562,7 +561,7 @@ static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
goto out;
}
- filp = dentry_open(dentry, mnt, O_RDONLY);
+ filp = dentry_open(dentry, mnt, O_RDONLY, current_cred());
if (IS_ERR(filp)) {
put_unused_fd(ret);
ret = PTR_ERR(filp);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 2ad914c47493..6a0ad196aeb3 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -166,9 +166,9 @@ void spu_update_sched_info(struct spu_context *ctx)
static int __node_allowed(struct spu_context *ctx, int node)
{
if (nr_cpus_node(node)) {
- cpumask_t mask = node_to_cpumask(node);
+ const struct cpumask *mask = cpumask_of_node(node);
- if (cpus_intersects(mask, ctx->cpus_allowed))
+ if (cpumask_intersects(mask, &ctx->cpus_allowed))
return 1;
}
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 15c62d3ca129..3bf908e2873a 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -314,7 +314,7 @@ extern char *isolated_loader;
* we need to call spu_release(ctx) before sleeping, and
* then spu_acquire(ctx) when awoken.
*
- * Returns with state_mutex re-acquired when successfull or
+ * Returns with state_mutex re-acquired when successful or
* with -ERESTARTSYS and the state_mutex dropped when interrupted.
*/
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index d3cde6b9d2df..f6b0c519d5a2 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -141,6 +141,7 @@ hydra_init(void)
of_node_put(np);
return 0;
}
+ of_node_put(np);
Hydra = ioremap(r.start, r.end-r.start);
printk("Hydra Mac I/O at %llx\n", (unsigned long long)r.start);
printk("Hydra Feature_Control was %x",
@@ -198,7 +199,7 @@ static void __init setup_peg2(struct pci_controller *hose, struct device_node *d
printk ("RTAS supporting Pegasos OF not found, please upgrade"
" your firmware\n");
}
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
/* keep the reference to the root node */
}
diff --git a/arch/powerpc/platforms/embedded6xx/c2k.c b/arch/powerpc/platforms/embedded6xx/c2k.c
index 32ba0fa0ad03..8cab5731850f 100644
--- a/arch/powerpc/platforms/embedded6xx/c2k.c
+++ b/arch/powerpc/platforms/embedded6xx/c2k.c
@@ -20,7 +20,6 @@
#include <linux/seq_file.h>
#include <linux/time.h>
#include <linux/of.h>
-#include <linux/kexec.h>
#include <asm/machdep.h>
#include <asm/prom.h>
@@ -147,9 +146,4 @@ define_machine(c2k) {
.get_irq = mv64x60_get_irq,
.restart = c2k_restart,
.calibrate_decr = generic_calibrate_decr,
-#ifdef CONFIG_KEXEC
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
-#endif
};
diff --git a/arch/powerpc/platforms/embedded6xx/prpmc2800.c b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
index 4c485e984236..670035f49a69 100644
--- a/arch/powerpc/platforms/embedded6xx/prpmc2800.c
+++ b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
@@ -19,7 +19,6 @@
#include <asm/prom.h>
#include <asm/system.h>
#include <asm/time.h>
-#include <asm/kexec.h>
#include <mm/mmu_decl.h>
@@ -155,9 +154,4 @@ define_machine(prpmc2800){
.get_irq = mv64x60_get_irq,
.restart = prpmc2800_restart,
.calibrate_decr = generic_calibrate_decr,
-#ifdef CONFIG_KEXEC
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
-#endif
};
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
index 45ffd8e542f4..7ddd0a2c8027 100644
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -9,18 +9,22 @@ menu "iSeries device drivers"
config VIODASD
tristate "iSeries Virtual I/O disk support"
+ depends on BLOCK
+ select VIOPATH
help
If you are running on an iSeries system and you want to use
virtual disks created and managed by OS/400, say Y.
config VIOCD
tristate "iSeries Virtual I/O CD support"
+ select VIOPATH
help
If you are running Linux on an IBM iSeries system and you want to
read a CD drive owned by OS/400, say Y here.
config VIOTAPE
tristate "iSeries Virtual Tape Support"
+ select VIOPATH
help
If you are running Linux on an iSeries system and you want Linux
to read and/or write a tape drive owned by OS/400, say Y here.
@@ -29,5 +33,3 @@ endmenu
config VIOPATH
bool
- depends on VIODASD || VIOCD || VIOTAPE || ISERIES_VETH
- default y
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 70b688c1aefb..24519b96d6ad 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -23,6 +23,7 @@
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/kdev_t.h>
+#include <linux/kexec.h>
#include <linux/major.h>
#include <linux/root_dev.h>
#include <linux/kernel.h>
@@ -638,6 +639,13 @@ static int __init iseries_probe(void)
return 1;
}
+#ifdef CONFIG_KEXEC
+static int iseries_kexec_prepare(struct kimage *image)
+{
+ return -ENOSYS;
+}
+#endif
+
define_machine(iseries) {
.name = "iSeries",
.setup_arch = iSeries_setup_arch,
@@ -658,6 +666,9 @@ define_machine(iseries) {
.probe = iseries_probe,
.ioremap = iseries_ioremap,
.iounmap = iseries_iounmap,
+#ifdef CONFIG_KEXEC
+ .machine_kexec_prepare = iseries_kexec_prepare,
+#endif
/* XXX Implement enable_pmcs for iSeries */
};
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index d4c61c3c9669..bfd60e4accee 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -50,7 +50,6 @@
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/io.h>
-#include <asm/kexec.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
@@ -335,9 +334,4 @@ define_machine(maple) {
.calibrate_decr = generic_calibrate_decr,
.progress = maple_progress,
.power_save = power4_idle,
-#ifdef CONFIG_KEXEC
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
-#endif
};
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index 58556b028a4c..86db47c1b665 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -112,7 +112,7 @@ static int get_gizmo_latency(void)
static void set_astate(int cpu, unsigned int astate)
{
- u64 flags;
+ unsigned long flags;
/* Return if called before init has run */
if (unlikely(!sdcasr_mapbase))
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index 217af321b0ca..a6152d922243 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -509,7 +509,7 @@ fallback:
*/
int pasemi_dma_init(void)
{
- static spinlock_t init_lock = SPIN_LOCK_UNLOCKED;
+ static DEFINE_SPINLOCK(init_lock);
struct pci_dev *iob_pdev;
struct pci_dev *pdev;
struct resource res;
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 792d3ce8112e..65c585b8b00d 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -310,7 +310,7 @@ static int pmu_set_cpu_speed(int low_speed)
_set_L3CR(save_l3cr);
/* Restore userland MMU context */
- set_context(current->active_mm->context.id, current->active_mm->pgd);
+ switch_mmu_context(NULL, current->active_mm);
#ifdef DEBUG_FREQ
printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index bcf50d7056e9..04cdd32624d4 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -661,6 +661,7 @@ static void __init init_second_ohare(void)
pci_find_hose_for_OF_device(np);
if (!hose) {
printk(KERN_ERR "Can't find PCI hose for OHare2 !\n");
+ of_node_put(np);
return;
}
early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd);
@@ -669,6 +670,7 @@ static void __init init_second_ohare(void)
early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd);
}
has_second_ohare = 1;
+ of_node_put(np);
}
/*
@@ -729,7 +731,7 @@ static void __init setup_bandit(struct pci_controller *hose,
static int __init setup_uninorth(struct pci_controller *hose,
struct resource *addr)
{
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
has_uninorth = 1;
hose->ops = &macrisc_pci_ops;
hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
@@ -996,7 +998,7 @@ void __init pmac_pci_init(void)
struct device_node *np, *root;
struct device_node *ht = NULL;
- ppc_pci_flags = PPC_PCI_CAN_SKIP_ISA_ALIGN;
+ ppc_pci_set_flags(PPC_PCI_CAN_SKIP_ISA_ALIGN);
root = of_find_node_by_path("/");
if (root == NULL) {
@@ -1055,7 +1057,7 @@ void __init pmac_pci_init(void)
* some offset between bus number and domains for now when we
* assign all busses should help for now
*/
- if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_BUS)
+ if (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
pcibios_assign_bus_offset = 0x10;
#endif
}
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 82c14d203d8b..9b78f5300c24 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -60,7 +60,6 @@
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/io.h>
-#include <asm/kexec.h>
#include <asm/pci-bridge.h>
#include <asm/ohare.h>
#include <asm/mediabay.h>
@@ -310,9 +309,7 @@ static void __init pmac_setup_arch(void)
}
/* See if newworld or oldworld */
- for (ic = NULL; (ic = of_find_all_nodes(ic)) != NULL; )
- if (of_get_property(ic, "interrupt-controller", NULL))
- break;
+ ic = of_find_node_with_property(NULL, "interrupt-controller");
if (ic) {
pmac_newworld = 1;
of_node_put(ic);
@@ -740,11 +737,6 @@ define_machine(powermac) {
.pci_probe_mode = pmac_pci_probe_mode,
.power_save = power4_idle,
.enable_pmcs = power4_enable_pmcs,
-#ifdef CONFIG_KEXEC
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
-#endif
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32
.pcibios_enable_device_hook = pmac_pci_enable_device_hook,
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
index adee28da353f..1c2802fabd57 100644
--- a/arch/powerpc/platforms/powermac/sleep.S
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -17,6 +17,7 @@
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
+#include <asm/mmu.h>
#define MAGIC 0x4c617273 /* 'Lars' */
@@ -323,7 +324,7 @@ grackle_wake_up:
lwz r4,SL_IBAT3+4(r1)
mtibatl 3,r4
-BEGIN_FTR_SECTION
+BEGIN_MMU_FTR_SECTION
li r4,0
mtspr SPRN_DBAT4U,r4
mtspr SPRN_DBAT4L,r4
@@ -341,7 +342,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_IBAT6L,r4
mtspr SPRN_IBAT7U,r4
mtspr SPRN_IBAT7L,r4
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
/* Flush all TLBs */
lis r4,0x1000
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 40f72c2a4699..6b0711c15eca 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -739,7 +739,7 @@ static void __init smp_core99_setup(int ncpus)
/* XXX should get this from reg properties */
for (i = 1; i < ncpus; ++i)
- smp_hw_index[i] = i;
+ set_hard_smp_processor_id(i, i);
}
#endif
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index 59eb840d8ce2..1810e4226e56 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -265,12 +265,15 @@ int __init via_calibrate_decr(void)
struct resource rsrc;
vias = of_find_node_by_name(NULL, "via-cuda");
- if (vias == 0)
+ if (vias == NULL)
vias = of_find_node_by_name(NULL, "via-pmu");
- if (vias == 0)
+ if (vias == NULL)
vias = of_find_node_by_name(NULL, "via");
- if (vias == 0 || of_address_to_resource(vias, 0, &rsrc))
+ if (vias == NULL || of_address_to_resource(vias, 0, &rsrc)) {
+ of_node_put(vias);
return 0;
+ }
+ of_node_put(vias);
via = ioremap(rsrc.start, rsrc.end - rsrc.start + 1);
if (via == NULL) {
printk(KERN_ERR "Failed to map VIA for timer calibration !\n");
@@ -297,7 +300,7 @@ int __init via_calibrate_decr(void)
ppc_tb_freq = (dstart - dend) * 100 / 6;
iounmap(via);
-
+
return 1;
}
#endif
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index ffdd8e963fbd..ca71a12b764c 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -314,11 +314,17 @@ static int __init ps3_setup_vuart_device(enum ps3_match_id match_id,
result = ps3_system_bus_device_register(&p->dev);
- if (result)
+ if (result) {
pr_debug("%s:%d ps3_system_bus_device_register failed\n",
__func__, __LINE__);
-
+ goto fail_device_register;
+ }
pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ return 0;
+
+fail_device_register:
+ kfree(p);
+ pr_debug(" <- %s:%d fail\n", __func__, __LINE__);
return result;
}
@@ -463,11 +469,17 @@ static int __init ps3_register_sound_devices(void)
result = ps3_system_bus_device_register(&p->dev);
- if (result)
+ if (result) {
pr_debug("%s:%d ps3_system_bus_device_register failed\n",
__func__, __LINE__);
-
+ goto fail_device_register;
+ }
pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ return 0;
+
+fail_device_register:
+ kfree(p);
+ pr_debug(" <- %s:%d failed\n", __func__, __LINE__);
return result;
}
@@ -485,17 +497,59 @@ static int __init ps3_register_graphics_devices(void)
if (!p)
return -ENOMEM;
- p->dev.match_id = PS3_MATCH_ID_GRAPHICS;
- p->dev.match_sub_id = PS3_MATCH_SUB_ID_FB;
+ p->dev.match_id = PS3_MATCH_ID_GPU;
+ p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_FB;
p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
result = ps3_system_bus_device_register(&p->dev);
- if (result)
+ if (result) {
+ pr_debug("%s:%d ps3_system_bus_device_register failed\n",
+ __func__, __LINE__);
+ goto fail_device_register;
+ }
+
+ pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ return 0;
+
+fail_device_register:
+ kfree(p);
+ pr_debug(" <- %s:%d failed\n", __func__, __LINE__);
+ return result;
+}
+
+static int __init ps3_register_ramdisk_device(void)
+{
+ int result;
+ struct layout {
+ struct ps3_system_bus_device dev;
+ } *p;
+
+ pr_debug(" -> %s:%d\n", __func__, __LINE__);
+
+ p = kzalloc(sizeof(struct layout), GFP_KERNEL);
+
+ if (!p)
+ return -ENOMEM;
+
+ p->dev.match_id = PS3_MATCH_ID_GPU;
+ p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK;
+ p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
+
+ result = ps3_system_bus_device_register(&p->dev);
+
+ if (result) {
pr_debug("%s:%d ps3_system_bus_device_register failed\n",
__func__, __LINE__);
+ goto fail_device_register;
+ }
pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ return 0;
+
+fail_device_register:
+ kfree(p);
+ pr_debug(" <- %s:%d failed\n", __func__, __LINE__);
return result;
}
@@ -927,6 +981,8 @@ static int __init ps3_register_devices(void)
ps3_register_lpm_devices();
+ ps3_register_ramdisk_device();
+
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return 0;
}
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 3a58ffabccd9..a4d49dd9e8a9 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -649,7 +649,7 @@ static int dma_sb_region_create(struct ps3_dma_region *r)
{
int result;
- pr_info(" -> %s:%d:\n", __func__, __LINE__);
+ DBG(" -> %s:%d:\n", __func__, __LINE__);
BUG_ON(!r);
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 77bc330263c4..35f3e85cf60e 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -23,7 +23,6 @@
#include <linux/fs.h>
#include <linux/root_dev.h>
#include <linux/console.h>
-#include <linux/kexec.h>
#include <linux/bootmem.h>
#include <asm/machdep.h>
@@ -42,6 +41,10 @@
#define DBG pr_debug
#endif
+/* mutex synchronizing GPU accesses and video mode changes */
+DEFINE_MUTEX(ps3_gpu_mutex);
+EXPORT_SYMBOL_GPL(ps3_gpu_mutex);
+
#if !defined(CONFIG_SMP)
static void smp_send_stop(void) {}
#endif
@@ -277,8 +280,5 @@ define_machine(ps3) {
.halt = ps3_halt,
#if defined(CONFIG_KEXEC)
.kexec_cpu_down = ps3_kexec_cpu_down,
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
#endif
};
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 661e9f77ebf6..ee0d22911621 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -31,7 +31,7 @@
#include "platform.h"
static struct device ps3_system_bus = {
- .bus_id = "ps3_system",
+ .init_name = "ps3_system",
};
/* FIXME: need device usage counters! */
@@ -175,7 +175,7 @@ int ps3_open_hv_device(struct ps3_system_bus_device *dev)
return ps3_open_hv_device_sb(dev);
case PS3_MATCH_ID_SOUND:
- case PS3_MATCH_ID_GRAPHICS:
+ case PS3_MATCH_ID_GPU:
return ps3_open_hv_device_gpu(dev);
case PS3_MATCH_ID_AV_SETTINGS:
@@ -213,7 +213,7 @@ int ps3_close_hv_device(struct ps3_system_bus_device *dev)
return ps3_close_hv_device_sb(dev);
case PS3_MATCH_ID_SOUND:
- case PS3_MATCH_ID_GRAPHICS:
+ case PS3_MATCH_ID_GPU:
return ps3_close_hv_device_gpu(dev);
case PS3_MATCH_ID_AV_SETTINGS:
@@ -356,12 +356,12 @@ static int ps3_system_bus_match(struct device *_dev,
if (result)
pr_info("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): match\n",
__func__, __LINE__,
- dev->match_id, dev->match_sub_id, dev->core.bus_id,
+ dev->match_id, dev->match_sub_id, dev_name(&dev->core),
drv->match_id, drv->match_sub_id, drv->core.name);
else
pr_debug("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): miss\n",
__func__, __LINE__,
- dev->match_id, dev->match_sub_id, dev->core.bus_id,
+ dev->match_id, dev->match_sub_id, dev_name(&dev->core),
drv->match_id, drv->match_sub_id, drv->core.name);
return result;
@@ -383,9 +383,9 @@ static int ps3_system_bus_probe(struct device *_dev)
result = drv->probe(dev);
else
pr_debug("%s:%d: %s no probe method\n", __func__, __LINE__,
- dev->core.bus_id);
+ dev_name(&dev->core));
- pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev->core.bus_id);
+ pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core));
return result;
}
@@ -407,7 +407,7 @@ static int ps3_system_bus_remove(struct device *_dev)
dev_dbg(&dev->core, "%s:%d %s: no remove method\n",
__func__, __LINE__, drv->core.name);
- pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev->core.bus_id);
+ pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core));
return result;
}
@@ -432,7 +432,7 @@ static void ps3_system_bus_shutdown(struct device *_dev)
BUG_ON(!drv);
dev_dbg(&dev->core, "%s:%d: %s -> %s\n", __func__, __LINE__,
- dev->core.bus_id, drv->core.name);
+ dev_name(&dev->core), drv->core.name);
if (drv->shutdown)
drv->shutdown(dev);
@@ -453,7 +453,8 @@ static int ps3_system_bus_uevent(struct device *_dev, struct kobj_uevent_env *en
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
- if (add_uevent_var(env, "MODALIAS=ps3:%d", dev->match_id))
+ if (add_uevent_var(env, "MODALIAS=ps3:%d:%d", dev->match_id,
+ dev->match_sub_id))
return -ENOMEM;
return 0;
}
@@ -462,7 +463,8 @@ static ssize_t modalias_show(struct device *_dev, struct device_attribute *a,
char *buf)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
- int len = snprintf(buf, PAGE_SIZE, "ps3:%d\n", dev->match_id);
+ int len = snprintf(buf, PAGE_SIZE, "ps3:%d:%d\n", dev->match_id,
+ dev->match_sub_id);
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
@@ -742,22 +744,18 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_IOC0:
dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops;
- snprintf(dev->core.bus_id, sizeof(dev->core.bus_id),
- "ioc0_%02x", ++dev_ioc0_count);
+ dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count);
break;
case PS3_DEVICE_TYPE_SB:
dev->core.archdata.dma_ops = &ps3_sb_dma_ops;
- snprintf(dev->core.bus_id, sizeof(dev->core.bus_id),
- "sb_%02x", ++dev_sb_count);
+ dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count);
break;
case PS3_DEVICE_TYPE_VUART:
- snprintf(dev->core.bus_id, sizeof(dev->core.bus_id),
- "vuart_%02x", ++dev_vuart_count);
+ dev_set_name(&dev->core, "vuart_%02x", ++dev_vuart_count);
break;
case PS3_DEVICE_TYPE_LPM:
- snprintf(dev->core.bus_id, sizeof(dev->core.bus_id),
- "lpm_%02x", ++dev_lpm_count);
+ dev_set_name(&dev->core, "lpm_%02x", ++dev_lpm_count);
break;
default:
BUG();
@@ -766,7 +764,7 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
dev->core.archdata.of_node = NULL;
set_dev_node(&dev->core, 0);
- pr_debug("%s:%d add %s\n", __func__, __LINE__, dev->core.bus_id);
+ pr_debug("%s:%d add %s\n", __func__, __LINE__, dev_name(&dev->core));
result = device_register(&dev->core);
return result;
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 97619fd51e39..ddc2a307cd50 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -54,7 +54,7 @@ config PPC_SMLPAR
config CMM
tristate "Collaborative memory management"
- depends on PPC_SMLPAR
+ depends on PPC_SMLPAR && !CRASH_DUMP
default y
help
Select this option, if you want to enable the kernel interface
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 5cd4d2761620..6567439fe78d 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -28,6 +28,7 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/oom.h>
+#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/stringify.h>
#include <linux/swap.h>
@@ -384,6 +385,26 @@ static void cmm_unregister_sysfs(struct sys_device *sysdev)
}
/**
+ * cmm_reboot_notifier - Make sure pages are not still marked as "loaned"
+ *
+ **/
+static int cmm_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ if (action == SYS_RESTART) {
+ if (cmm_thread_ptr)
+ kthread_stop(cmm_thread_ptr);
+ cmm_thread_ptr = NULL;
+ cmm_free_pages(loaned_pages);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cmm_reboot_nb = {
+ .notifier_call = cmm_reboot_notifier,
+};
+
+/**
* cmm_init - Module initialization
*
* Return value:
@@ -399,9 +420,12 @@ static int cmm_init(void)
if ((rc = register_oom_notifier(&cmm_oom_nb)) < 0)
return rc;
- if ((rc = cmm_sysfs_register(&cmm_sysdev)))
+ if ((rc = register_reboot_notifier(&cmm_reboot_nb)))
goto out_oom_notifier;
+ if ((rc = cmm_sysfs_register(&cmm_sysdev)))
+ goto out_reboot_notifier;
+
if (cmm_disabled)
return rc;
@@ -415,6 +439,8 @@ static int cmm_init(void)
out_unregister_sysfs:
cmm_unregister_sysfs(&cmm_sysdev);
+out_reboot_notifier:
+ unregister_reboot_notifier(&cmm_reboot_nb);
out_oom_notifier:
unregister_oom_notifier(&cmm_oom_nb);
return rc;
@@ -431,6 +457,7 @@ static void cmm_exit(void)
if (cmm_thread_ptr)
kthread_stop(cmm_thread_ptr);
unregister_oom_notifier(&cmm_oom_nb);
+ unregister_reboot_notifier(&cmm_reboot_nb);
cmm_free_pages(loaned_pages);
cmm_unregister_sysfs(&cmm_sysdev);
}
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 54816d75b578..989d6462c154 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -21,6 +21,8 @@
* Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com>
*/
+#undef DEBUG
+
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/list.h>
@@ -488,10 +490,8 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
pdn->eeh_mode & EEH_MODE_NOCHECK) {
ignored_check++;
-#ifdef DEBUG
- printk ("EEH:ignored check (%x) for %s %s\n",
- pdn->eeh_mode, pci_name (dev), dn->full_name);
-#endif
+ pr_debug("EEH: Ignored check (%x) for %s %s\n",
+ pdn->eeh_mode, pci_name (dev), dn->full_name);
return 0;
}
@@ -1014,10 +1014,9 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
eeh_subsystem_enabled = 1;
pdn->eeh_mode |= EEH_MODE_SUPPORTED;
-#ifdef DEBUG
- printk(KERN_DEBUG "EEH: %s: eeh enabled, config=%x pe_config=%x\n",
- dn->full_name, pdn->eeh_config_addr, pdn->eeh_pe_config_addr);
-#endif
+ pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n",
+ dn->full_name, pdn->eeh_config_addr,
+ pdn->eeh_pe_config_addr);
} else {
/* This device doesn't support EEH, but it may have an
@@ -1161,13 +1160,17 @@ static void eeh_add_device_late(struct pci_dev *dev)
if (!dev || !eeh_subsystem_enabled)
return;
-#ifdef DEBUG
- printk(KERN_DEBUG "EEH: adding device %s\n", pci_name(dev));
-#endif
+ pr_debug("EEH: Adding device %s\n", pci_name(dev));
- pci_dev_get (dev);
dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
+ if (pdn->pcidev == dev) {
+ pr_debug("EEH: Already referenced !\n");
+ return;
+ }
+ WARN_ON(pdn->pcidev);
+
+ pci_dev_get (dev);
pdn->pcidev = dev;
pci_addr_cache_insert_device(dev);
@@ -1206,17 +1209,18 @@ static void eeh_remove_device(struct pci_dev *dev)
return;
/* Unregister the device with the EEH/PCI address search system */
-#ifdef DEBUG
- printk(KERN_DEBUG "EEH: remove device %s\n", pci_name(dev));
-#endif
- pci_addr_cache_remove_device(dev);
- eeh_sysfs_remove_device(dev);
+ pr_debug("EEH: Removing device %s\n", pci_name(dev));
dn = pci_device_to_OF_node(dev);
- if (PCI_DN(dn)->pcidev) {
- PCI_DN(dn)->pcidev = NULL;
- pci_dev_put (dev);
+ if (PCI_DN(dn)->pcidev == NULL) {
+ pr_debug("EEH: Not referenced !\n");
+ return;
}
+ PCI_DN(dn)->pcidev = NULL;
+ pci_dev_put (dev);
+
+ pci_addr_cache_remove_device(dev);
+ eeh_sysfs_remove_device(dev);
}
void eeh_remove_bus_device(struct pci_dev *dev)
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 1f032483c026..a20ead87153d 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -116,7 +116,7 @@ static void pseries_cpu_die(unsigned int cpu)
cpu_status = query_cpu_stopped(pcpu);
if (cpu_status == 0 || cpu_status == -1)
break;
- msleep(200);
+ cpu_relax();
}
if (cpu_status != 0) {
printk("Querying DEAD? cpu %i (%i) shows %i\n",
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 7190493e9bdc..5e1ed3d60ee5 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -25,6 +25,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#undef DEBUG
+
#include <linux/pci.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
@@ -69,74 +71,25 @@ EXPORT_SYMBOL_GPL(pcibios_find_pci_bus);
* Remove all of the PCI devices under this bus both from the
* linux pci device tree, and from the powerpc EEH address cache.
*/
-void
-pcibios_remove_pci_devices(struct pci_bus *bus)
+void pcibios_remove_pci_devices(struct pci_bus *bus)
{
- struct pci_dev *dev, *tmp;
+ struct pci_dev *dev, *tmp;
+ struct pci_bus *child_bus;
+
+ /* First go down child busses */
+ list_for_each_entry(child_bus, &bus->children, node)
+ pcibios_remove_pci_devices(child_bus);
+ pr_debug("PCI: Removing devices on bus %04x:%02x\n",
+ pci_domain_nr(bus), bus->number);
list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
+ pr_debug(" * Removing %s...\n", pci_name(dev));
eeh_remove_bus_device(dev);
- pci_remove_bus_device(dev);
- }
+ pci_remove_bus_device(dev);
+ }
}
EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
-/* Must be called before pci_bus_add_devices */
-void
-pcibios_fixup_new_pci_devices(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- /* Skip already-added devices */
- if (!dev->is_added) {
- int i;
-
- /* Fill device archdata and setup iommu table */
- pcibios_setup_new_device(dev);
-
- pci_read_irq_line(dev);
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- struct resource *r = &dev->resource[i];
-
- if (r->parent || !r->start || !r->flags)
- continue;
- pci_claim_resource(dev, i);
- }
- }
- }
-}
-EXPORT_SYMBOL_GPL(pcibios_fixup_new_pci_devices);
-
-static int
-pcibios_pci_config_bridge(struct pci_dev *dev)
-{
- u8 sec_busno;
- struct pci_bus *child_bus;
-
- /* Get busno of downstream bus */
- pci_read_config_byte(dev, PCI_SECONDARY_BUS, &sec_busno);
-
- /* Add to children of PCI bridge dev->bus */
- child_bus = pci_add_new_bus(dev->bus, dev, sec_busno);
- if (!child_bus) {
- printk (KERN_ERR "%s: could not add second bus\n", __func__);
- return -EIO;
- }
- sprintf(child_bus->name, "PCI Bus #%02x", child_bus->number);
-
- pci_scan_child_bus(child_bus);
-
- /* Fixup new pci devices */
- pcibios_fixup_new_pci_devices(child_bus);
-
- /* Make the discovered devices available */
- pci_bus_add_devices(child_bus);
-
- eeh_add_device_tree_late(child_bus);
- return 0;
-}
-
/**
* pcibios_add_pci_devices - adds new pci devices to bus
*
@@ -147,10 +100,9 @@ pcibios_pci_config_bridge(struct pci_dev *dev)
* is how this routine differs from other, similar pcibios
* routines.)
*/
-void
-pcibios_add_pci_devices(struct pci_bus * bus)
+void pcibios_add_pci_devices(struct pci_bus * bus)
{
- int slotno, num, mode;
+ int slotno, num, mode, pass, max;
struct pci_dev *dev;
struct device_node *dn = pci_bus_to_OF_node(bus);
@@ -162,26 +114,23 @@ pcibios_add_pci_devices(struct pci_bus * bus)
if (mode == PCI_PROBE_DEVTREE) {
/* use ofdt-based probe */
- of_scan_bus(dn, bus);
- if (!list_empty(&bus->devices)) {
- pcibios_fixup_new_pci_devices(bus);
- pci_bus_add_devices(bus);
- eeh_add_device_tree_late(bus);
- }
+ of_rescan_bus(dn, bus);
} else if (mode == PCI_PROBE_NORMAL) {
/* use legacy probe */
slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
- if (num) {
- pcibios_fixup_new_pci_devices(bus);
- pci_bus_add_devices(bus);
- eeh_add_device_tree_late(bus);
+ if (!num)
+ return;
+ pcibios_setup_bus_devices(bus);
+ max = bus->secondary;
+ for (pass=0; pass < 2; pass++)
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
+ dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ max = pci_scan_bridge(bus, dev, max, pass);
}
-
- list_for_each_entry(dev, &bus->devices, bus_list)
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
- pcibios_pci_config_bridge(dev);
}
+ pcibios_finish_adding_to_bus(bus);
}
EXPORT_SYMBOL_GPL(pcibios_add_pci_devices);
@@ -190,6 +139,8 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
struct pci_controller *phb;
int primary;
+ pr_debug("PCI: Initializing new hotplug PHB %s\n", dn->full_name);
+
primary = list_empty(&hose_list);
phb = pcibios_alloc_controller(dn);
if (!phb)
@@ -203,11 +154,59 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
eeh_add_device_tree_early(dn);
scan_phb(phb);
- pcibios_allocate_bus_resources(phb->bus);
- pcibios_fixup_new_pci_devices(phb->bus);
- pci_bus_add_devices(phb->bus);
- eeh_add_device_tree_late(phb->bus);
+ pcibios_finish_adding_to_bus(phb->bus);
return phb;
}
EXPORT_SYMBOL_GPL(init_phb_dynamic);
+
+/* RPA-specific bits for removing PHBs */
+int remove_phb_dynamic(struct pci_controller *phb)
+{
+ struct pci_bus *b = phb->bus;
+ struct resource *res;
+ int rc, i;
+
+ pr_debug("PCI: Removing PHB %04x:%02x... \n",
+ pci_domain_nr(b), b->number);
+
+ /* We cannot to remove a root bus that has children */
+ if (!(list_empty(&b->children) && list_empty(&b->devices)))
+ return -EBUSY;
+
+ /* We -know- there aren't any child devices anymore at this stage
+ * and thus, we can safely unmap the IO space as it's not in use
+ */
+ res = &phb->io_resource;
+ if (res->flags & IORESOURCE_IO) {
+ rc = pcibios_unmap_io_space(b);
+ if (rc) {
+ printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
+ __func__, b->name);
+ return 1;
+ }
+ }
+
+ /* Unregister the bridge device from sysfs and remove the PCI bus */
+ device_unregister(b->bridge);
+ phb->bus = NULL;
+ pci_remove_bus(b);
+
+ /* Now release the IO resource */
+ if (res->flags & IORESOURCE_IO)
+ release_resource(res);
+
+ /* Release memory resources */
+ for (i = 0; i < 3; ++i) {
+ res = &phb->mem_resources[i];
+ if (!(res->flags & IORESOURCE_MEM))
+ continue;
+ release_resource(res);
+ }
+
+ /* Free pci_controller data structure */
+ pcibios_free_controller(phb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(remove_phb_dynamic);
diff --git a/arch/powerpc/platforms/pseries/phyp_dump.c b/arch/powerpc/platforms/pseries/phyp_dump.c
index edbc012c2ebc..6cf35cd8d0b5 100644
--- a/arch/powerpc/platforms/pseries/phyp_dump.c
+++ b/arch/powerpc/platforms/pseries/phyp_dump.c
@@ -130,6 +130,9 @@ static unsigned long init_dump_header(struct phyp_dump_header *ph)
static void print_dump_header(const struct phyp_dump_header *ph)
{
#ifdef DEBUG
+ if (ph == NULL)
+ return;
+
printk(KERN_INFO "dump header:\n");
/* setup some ph->sections required */
printk(KERN_INFO "version = %d\n", ph->version);
@@ -411,6 +414,8 @@ static int __init phyp_dump_setup(void)
of_node_put(rtas);
}
+ ibm_configure_kernel_dump = rtas_token("ibm,configure-kernel-dump");
+
print_dump_header(dump_header);
dump_area_length = init_dump_header(&phdr);
/* align down */
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
index f4e55be2eea9..afad9f5ac0ac 100644
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/platforms/pseries/rtasd.c
@@ -208,6 +208,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
break;
case ERR_TYPE_KERNEL_PANIC:
default:
+ WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
@@ -227,6 +228,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
/* Check to see if we need to or have stopped logging */
if (fatal || !logging_enabled) {
logging_enabled = 0;
+ WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
@@ -249,11 +251,13 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
else
rtas_log_start += 1;
+ WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s);
wake_up_interruptible(&rtas_log_wait);
break;
case ERR_TYPE_KERNEL_PANIC:
default:
+ WARN_ON_ONCE(!irqs_disabled()); /* @@@ DEBUG @@@ */
spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index e1904774a70f..84e058f1e1cc 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -332,7 +332,7 @@ static void xics_eoi_lpar(unsigned int virq)
lpar_xirr_info_set((0xff << 24) | irq);
}
-static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
+static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
{
unsigned int irq;
int status;
@@ -579,7 +579,7 @@ static void xics_update_irq_servers(void)
int i, j;
struct device_node *np;
u32 ilen;
- const u32 *ireg, *isize;
+ const u32 *ireg;
u32 hcpuid;
/* Find the server numbers for the boot cpu. */
@@ -607,11 +607,6 @@ static void xics_update_irq_servers(void)
}
}
- /* get the bit size of server numbers */
- isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
- if (isize)
- interrupt_server_size = *isize;
-
of_node_put(np);
}
@@ -682,6 +677,7 @@ void __init xics_init_IRQ(void)
struct device_node *np;
u32 indx = 0;
int found = 0;
+ const u32 *isize;
ppc64_boot_msg(0x20, "XICS Init");
@@ -701,6 +697,26 @@ void __init xics_init_IRQ(void)
if (found == 0)
return;
+ /* get the bit size of server numbers */
+ found = 0;
+
+ for_each_compatible_node(np, NULL, "ibm,ppc-xics") {
+ isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
+
+ if (!isize)
+ continue;
+
+ if (!found) {
+ interrupt_server_size = *isize;
+ found = 1;
+ } else if (*isize != interrupt_server_size) {
+ printk(KERN_WARNING "XICS: "
+ "mismatched ibm,interrupt-server#-size\n");
+ interrupt_server_size = max(*isize,
+ interrupt_server_size);
+ }
+ }
+
xics_update_irq_servers();
xics_init_host();
@@ -728,9 +744,18 @@ static void xics_set_cpu_priority(unsigned char cppr)
/* Have the calling processor join or leave the specified global queue */
static void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
{
- int status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
- (1UL << interrupt_server_size) - 1 - gserver, join);
- WARN_ON(status < 0);
+ int index;
+ int status;
+
+ if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
+ return;
+
+ index = (1UL << interrupt_server_size) - 1 - gserver;
+
+ status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);
+
+ WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
+ GLOBAL_INTERRUPT_QUEUE, index, join, status);
}
void xics_setup_cpu(void)
@@ -845,7 +870,7 @@ void xics_migrate_irqs_away(void)
/* Reset affinity to all cpus */
irq_desc[virq].affinity = CPU_MASK_ALL;
- desc->chip->set_affinity(virq, CPU_MASK_ALL);
+ desc->chip->set_affinity(virq, cpu_all_mask);
unlock:
spin_unlock_irqrestore(&desc->lock, flags);
}
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 5afce115ab1f..b33b28a6fe12 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_FSL_PCI) += fsl_pci.o $(fsl-msi-obj-y)
obj-$(CONFIG_FSL_LBC) += fsl_lbc.o
obj-$(CONFIG_FSL_GTM) += fsl_gtm.o
obj-$(CONFIG_MPC8xxx_GPIO) += mpc8xxx_gpio.o
+obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o
obj-$(CONFIG_RAPIDIO) += fsl_rio.o
obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
obj-$(CONFIG_QUICC_ENGINE) += qe_lib/
diff --git a/arch/powerpc/sysdev/bestcomm/ata.c b/arch/powerpc/sysdev/bestcomm/ata.c
index 1f5258fb38c3..901c9f91e5dd 100644
--- a/arch/powerpc/sysdev/bestcomm/ata.c
+++ b/arch/powerpc/sysdev/bestcomm/ata.c
@@ -61,6 +61,9 @@ bcom_ata_init(int queue_len, int maxbufsize)
struct bcom_ata_var *var;
struct bcom_ata_inc *inc;
+ /* Prefetch breaks ATA DMA. Turn it off for ATA DMA */
+ bcom_disable_prefetch();
+
tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0);
if (!tsk)
return NULL;
diff --git a/arch/powerpc/sysdev/bestcomm/ata.h b/arch/powerpc/sysdev/bestcomm/ata.h
index 10982769c465..0b2371811334 100644
--- a/arch/powerpc/sysdev/bestcomm/ata.h
+++ b/arch/powerpc/sysdev/bestcomm/ata.h
@@ -16,22 +16,15 @@
struct bcom_ata_bd {
u32 status;
- u32 dst_pa;
u32 src_pa;
+ u32 dst_pa;
};
-extern struct bcom_task *
-bcom_ata_init(int queue_len, int maxbufsize);
-
-extern void
-bcom_ata_rx_prepare(struct bcom_task *tsk);
-
-extern void
-bcom_ata_tx_prepare(struct bcom_task *tsk);
-
-extern void
-bcom_ata_reset_bd(struct bcom_task *tsk);
-
+extern struct bcom_task * bcom_ata_init(int queue_len, int maxbufsize);
+extern void bcom_ata_rx_prepare(struct bcom_task *tsk);
+extern void bcom_ata_tx_prepare(struct bcom_task *tsk);
+extern void bcom_ata_reset_bd(struct bcom_task *tsk);
+extern void bcom_ata_release(struct bcom_task *tsk);
#endif /* __BESTCOMM_ATA_H__ */
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c
index 446c9ea85b30..378ebd9aac18 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.c
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c
@@ -279,7 +279,6 @@ bcom_engine_init(void)
int task;
phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa;
unsigned int tdt_size, ctx_size, var_size, fdt_size;
- u16 regval;
/* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */
tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt);
@@ -331,10 +330,8 @@ bcom_engine_init(void)
out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS);
/* Disable COMM Bus Prefetch on the original 5200; it's broken */
- if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR) {
- regval = in_be16(&bcom_eng->regs->PtdCntrl);
- out_be16(&bcom_eng->regs->PtdCntrl, regval | 1);
- }
+ if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
+ bcom_disable_prefetch();
/* Init lock */
spin_lock_init(&bcom_eng->lock);
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.h b/arch/powerpc/sysdev/bestcomm/bestcomm.h
index c960a8b49655..23a95f80dfdb 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.h
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.h
@@ -16,8 +16,19 @@
#ifndef __BESTCOMM_H__
#define __BESTCOMM_H__
-struct bcom_bd; /* defined later on ... */
-
+/**
+ * struct bcom_bd - Structure describing a generic BestComm buffer descriptor
+ * @status: The current status of this buffer. Exact meaning depends on the
+ * task type
+ * @data: An array of u32 extra data. Size of array is task dependant.
+ *
+ * Note: Don't dereference a bcom_bd pointer as an array. The size of the
+ * bcom_bd is variable. Use bcom_get_bd() instead.
+ */
+struct bcom_bd {
+ u32 status;
+ u32 data[0]; /* variable payload size */
+};
/* ======================================================================== */
/* Generic task management */
@@ -84,17 +95,6 @@ bcom_get_task_irq(struct bcom_task *tsk) {
/* BD based tasks helpers */
/* ======================================================================== */
-/**
- * struct bcom_bd - Structure describing a generic BestComm buffer descriptor
- * @status: The current status of this buffer. Exact meaning depends on the
- * task type
- * @data: An array of u32 whose meaning depends on the task type.
- */
-struct bcom_bd {
- u32 status;
- u32 data[1]; /* variable, but at least 1 */
-};
-
#define BCOM_BD_READY 0x40000000ul
/** _bcom_next_index - Get next input index.
@@ -140,15 +140,31 @@ bcom_queue_full(struct bcom_task *tsk)
}
/**
+ * bcom_get_bd - Get a BD from the queue
+ * @tsk: The BestComm task structure
+ * index: Index of the BD to fetch
+ */
+static inline struct bcom_bd
+*bcom_get_bd(struct bcom_task *tsk, unsigned int index)
+{
+ /* A cast to (void*) so the address can be incremented by the
+ * real size instead of by sizeof(struct bcom_bd) */
+ return ((void *)tsk->bd) + (index * tsk->bd_size);
+}
+
+/**
* bcom_buffer_done - Checks if a BestComm
* @tsk: The BestComm task structure
*/
static inline int
bcom_buffer_done(struct bcom_task *tsk)
{
+ struct bcom_bd *bd;
if (bcom_queue_empty(tsk))
return 0;
- return !(tsk->bd[tsk->outdex].status & BCOM_BD_READY);
+
+ bd = bcom_get_bd(tsk, tsk->outdex);
+ return !(bd->status & BCOM_BD_READY);
}
/**
@@ -160,16 +176,21 @@ bcom_buffer_done(struct bcom_task *tsk)
static inline struct bcom_bd *
bcom_prepare_next_buffer(struct bcom_task *tsk)
{
- tsk->bd[tsk->index].status = 0; /* cleanup last status */
- return &tsk->bd[tsk->index];
+ struct bcom_bd *bd;
+
+ bd = bcom_get_bd(tsk, tsk->index);
+ bd->status = 0; /* cleanup last status */
+ return bd;
}
static inline void
bcom_submit_next_buffer(struct bcom_task *tsk, void *cookie)
{
+ struct bcom_bd *bd = bcom_get_bd(tsk, tsk->index);
+
tsk->cookie[tsk->index] = cookie;
mb(); /* ensure the bd is really up-to-date */
- tsk->bd[tsk->index].status |= BCOM_BD_READY;
+ bd->status |= BCOM_BD_READY;
tsk->index = _bcom_next_index(tsk);
if (tsk->flags & BCOM_FLAGS_ENABLE_TASK)
bcom_enable(tsk);
@@ -179,10 +200,12 @@ static inline void *
bcom_retrieve_buffer(struct bcom_task *tsk, u32 *p_status, struct bcom_bd **p_bd)
{
void *cookie = tsk->cookie[tsk->outdex];
+ struct bcom_bd *bd = bcom_get_bd(tsk, tsk->outdex);
+
if (p_status)
- *p_status = tsk->bd[tsk->outdex].status;
+ *p_status = bd->status;
if (p_bd)
- *p_bd = &tsk->bd[tsk->outdex];
+ *p_bd = bd;
tsk->outdex = _bcom_next_outdex(tsk);
return cookie;
}
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
index 866a2915ef2f..eb0d1c883c31 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
@@ -198,8 +198,8 @@ struct bcom_task_header {
#define BCOM_IPR_SCTMR_1 2
#define BCOM_IPR_FEC_RX 6
#define BCOM_IPR_FEC_TX 5
-#define BCOM_IPR_ATA_RX 4
-#define BCOM_IPR_ATA_TX 3
+#define BCOM_IPR_ATA_RX 7
+#define BCOM_IPR_ATA_TX 7
#define BCOM_IPR_SCPCI_RX 2
#define BCOM_IPR_SCPCI_TX 2
#define BCOM_IPR_PSC3_RX 2
@@ -241,6 +241,22 @@ extern void bcom_set_initiator(int task, int initiator);
#define TASK_ENABLE 0x8000
+/**
+ * bcom_disable_prefetch - Hook to disable bus prefetching
+ *
+ * ATA DMA and the original MPC5200 need this due to silicon bugs. At the
+ * moment disabling prefetch is a one-way street. There is no mechanism
+ * in place to turn prefetch back on after it has been disabled. There is
+ * no reason it couldn't be done, it would just be more complex to implement.
+ */
+static inline void bcom_disable_prefetch(void)
+{
+ u16 regval;
+
+ regval = in_be16(&bcom_eng->regs->PtdCntrl);
+ out_be16(&bcom_eng->regs->PtdCntrl, regval | 1);
+};
+
static inline void
bcom_enable_task(int task)
{
diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S
index 2078f39e2f17..d3098ef1404a 100644
--- a/arch/powerpc/sysdev/dcr-low.S
+++ b/arch/powerpc/sysdev/dcr-low.S
@@ -11,14 +11,20 @@
#include <asm/ppc_asm.h>
#include <asm/processor.h>
+#include <asm/bug.h>
#define DCR_ACCESS_PROLOG(table) \
+ cmpli cr0,r3,1024; \
rlwinm r3,r3,4,18,27; \
lis r5,table@h; \
ori r5,r5,table@l; \
add r3,r3,r5; \
+ bge- 1f; \
mtctr r3; \
- bctr
+ bctr; \
+1: trap; \
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0; \
+ blr
_GLOBAL(__mfdcr)
DCR_ACCESS_PROLOG(__mfdcr_table)
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
index a8ba9983dd5a..bb44aa9fd470 100644
--- a/arch/powerpc/sysdev/dcr.c
+++ b/arch/powerpc/sysdev/dcr.c
@@ -124,7 +124,8 @@ EXPORT_SYMBOL_GPL(dcr_write_generic);
#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */
-unsigned int dcr_resource_start(struct device_node *np, unsigned int index)
+unsigned int dcr_resource_start(const struct device_node *np,
+ unsigned int index)
{
unsigned int ds;
const u32 *dr = of_get_property(np, "dcr-reg", &ds);
@@ -136,7 +137,7 @@ unsigned int dcr_resource_start(struct device_node *np, unsigned int index)
}
EXPORT_SYMBOL_GPL(dcr_resource_start);
-unsigned int dcr_resource_len(struct device_node *np, unsigned int index)
+unsigned int dcr_resource_len(const struct device_node *np, unsigned int index)
{
unsigned int ds;
const u32 *dr = of_get_property(np, "dcr-reg", &ds);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 5b264eb4b1f7..f611d0369cc8 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -29,7 +29,8 @@
#if defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_86xx)
/* atmu setup for fsl pci/pcie controller */
-void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc)
+static void __init setup_pci_atmu(struct pci_controller *hose,
+ struct resource *rsrc)
{
struct ccsr_pci __iomem *pci;
int i;
@@ -86,7 +87,7 @@ void __init setup_pci_atmu(struct pci_controller *hose, struct resource *rsrc)
out_be32(&pci->piw[2].piwar, PIWAR_2G);
}
-void __init setup_pci_cmd(struct pci_controller *hose)
+static void __init setup_pci_cmd(struct pci_controller *hose)
{
u16 cmd;
int cap_x;
@@ -130,7 +131,7 @@ static void __init quirk_fsl_pcie_header(struct pci_dev *dev)
return ;
}
-int __init fsl_pcie_check_link(struct pci_controller *hose)
+static int __init fsl_pcie_check_link(struct pci_controller *hose)
{
u32 val;
early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
@@ -187,7 +188,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
printk(KERN_WARNING "Can't get bus-range for %s, assume"
" bus 0\n", dev->full_name);
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
hose = pcibios_alloc_controller(dev);
if (!hose)
return -ENOMEM;
@@ -300,7 +301,7 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
" bus 0\n", dev->full_name);
}
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
hose = pcibios_alloc_controller(dev);
if (!hose)
return -ENOMEM;
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 26ecb96f9731..115cb16351fd 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -207,236 +207,51 @@ static int __init of_add_fixed_phys(void)
arch_initcall(of_add_fixed_phys);
#endif /* CONFIG_FIXED_PHY */
-static int gfar_mdio_of_init_one(struct device_node *np)
-{
- int k;
- struct device_node *child = NULL;
- struct gianfar_mdio_data mdio_data;
- struct platform_device *mdio_dev;
- struct resource res;
- int ret;
-
- memset(&res, 0, sizeof(res));
- memset(&mdio_data, 0, sizeof(mdio_data));
-
- ret = of_address_to_resource(np, 0, &res);
- if (ret)
- return ret;
-
- /* The gianfar device will try to use the same ID created below to find
- * this bus, to coordinate register access (since they share). */
- mdio_dev = platform_device_register_simple("fsl-gianfar_mdio",
- res.start&0xfffff, &res, 1);
- if (IS_ERR(mdio_dev))
- return PTR_ERR(mdio_dev);
-
- for (k = 0; k < 32; k++)
- mdio_data.irq[k] = PHY_POLL;
-
- while ((child = of_get_next_child(np, child)) != NULL) {
- int irq = irq_of_parse_and_map(child, 0);
- if (irq != NO_IRQ) {
- const u32 *id = of_get_property(child, "reg", NULL);
- mdio_data.irq[*id] = irq;
- }
- }
-
- ret = platform_device_add_data(mdio_dev, &mdio_data,
- sizeof(struct gianfar_mdio_data));
- if (ret)
- platform_device_unregister(mdio_dev);
-
- return ret;
-}
-
-static int __init gfar_mdio_of_init(void)
-{
- struct device_node *np = NULL;
-
- for_each_compatible_node(np, NULL, "fsl,gianfar-mdio")
- gfar_mdio_of_init_one(np);
-
- /* try the deprecated version */
- for_each_compatible_node(np, "mdio", "gianfar");
- gfar_mdio_of_init_one(np);
-
- return 0;
-}
-
-arch_initcall(gfar_mdio_of_init);
-
-static const char *gfar_tx_intr = "tx";
-static const char *gfar_rx_intr = "rx";
-static const char *gfar_err_intr = "error";
-
-static int __init gfar_of_init(void)
+#ifdef CONFIG_PPC_83xx
+static int __init mpc83xx_wdt_init(void)
{
+ struct resource r;
struct device_node *np;
- unsigned int i;
- struct platform_device *gfar_dev;
- struct resource res;
+ struct platform_device *dev;
+ u32 freq = fsl_get_sys_freq();
int ret;
- for (np = NULL, i = 0;
- (np = of_find_compatible_node(np, "network", "gianfar")) != NULL;
- i++) {
- struct resource r[4];
- struct device_node *phy, *mdio;
- struct gianfar_platform_data gfar_data;
- const unsigned int *id;
- const char *model;
- const char *ctype;
- const void *mac_addr;
- const phandle *ph;
- int n_res = 2;
-
- if (!of_device_is_available(np))
- continue;
-
- memset(r, 0, sizeof(r));
- memset(&gfar_data, 0, sizeof(gfar_data));
-
- ret = of_address_to_resource(np, 0, &r[0]);
- if (ret)
- goto err;
-
- of_irq_to_resource(np, 0, &r[1]);
-
- model = of_get_property(np, "model", NULL);
-
- /* If we aren't the FEC we have multiple interrupts */
- if (model && strcasecmp(model, "FEC")) {
- r[1].name = gfar_tx_intr;
-
- r[2].name = gfar_rx_intr;
- of_irq_to_resource(np, 1, &r[2]);
+ np = of_find_compatible_node(NULL, "watchdog", "mpc83xx_wdt");
- r[3].name = gfar_err_intr;
- of_irq_to_resource(np, 2, &r[3]);
-
- n_res += 2;
- }
-
- gfar_dev =
- platform_device_register_simple("fsl-gianfar", i, &r[0],
- n_res);
-
- if (IS_ERR(gfar_dev)) {
- ret = PTR_ERR(gfar_dev);
- goto err;
- }
-
- mac_addr = of_get_mac_address(np);
- if (mac_addr)
- memcpy(gfar_data.mac_addr, mac_addr, 6);
-
- if (model && !strcasecmp(model, "TSEC"))
- gfar_data.device_flags =
- FSL_GIANFAR_DEV_HAS_GIGABIT |
- FSL_GIANFAR_DEV_HAS_COALESCE |
- FSL_GIANFAR_DEV_HAS_RMON |
- FSL_GIANFAR_DEV_HAS_MULTI_INTR;
- if (model && !strcasecmp(model, "eTSEC"))
- gfar_data.device_flags =
- FSL_GIANFAR_DEV_HAS_GIGABIT |
- FSL_GIANFAR_DEV_HAS_COALESCE |
- FSL_GIANFAR_DEV_HAS_RMON |
- FSL_GIANFAR_DEV_HAS_MULTI_INTR |
- FSL_GIANFAR_DEV_HAS_CSUM |
- FSL_GIANFAR_DEV_HAS_VLAN |
- FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
-
- ctype = of_get_property(np, "phy-connection-type", NULL);
-
- /* We only care about rgmii-id. The rest are autodetected */
- if (ctype && !strcmp(ctype, "rgmii-id"))
- gfar_data.interface = PHY_INTERFACE_MODE_RGMII_ID;
- else
- gfar_data.interface = PHY_INTERFACE_MODE_MII;
-
- if (of_get_property(np, "fsl,magic-packet", NULL))
- gfar_data.device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
-
- ph = of_get_property(np, "phy-handle", NULL);
- if (ph == NULL) {
- u32 *fixed_link;
-
- fixed_link = (u32 *)of_get_property(np, "fixed-link",
- NULL);
- if (!fixed_link) {
- ret = -ENODEV;
- goto unreg;
- }
-
- snprintf(gfar_data.bus_id, MII_BUS_ID_SIZE, "0");
- gfar_data.phy_id = fixed_link[0];
- } else {
- phy = of_find_node_by_phandle(*ph);
-
- if (phy == NULL) {
- ret = -ENODEV;
- goto unreg;
- }
-
- mdio = of_get_parent(phy);
-
- id = of_get_property(phy, "reg", NULL);
- ret = of_address_to_resource(mdio, 0, &res);
- if (ret) {
- of_node_put(phy);
- of_node_put(mdio);
- goto unreg;
- }
-
- gfar_data.phy_id = *id;
- snprintf(gfar_data.bus_id, MII_BUS_ID_SIZE, "%llx",
- (unsigned long long)res.start&0xfffff);
+ if (!np) {
+ ret = -ENODEV;
+ goto nodev;
+ }
- of_node_put(phy);
- of_node_put(mdio);
- }
+ memset(&r, 0, sizeof(r));
- /* Get MDIO bus controlled by this eTSEC, if any. Normally only
- * eTSEC 1 will control an MDIO bus, not necessarily the same
- * bus that its PHY is on ('mdio' above), so we can't just use
- * that. What we do is look for a gianfar mdio device that has
- * overlapping registers with this device. That's really the
- * whole point, to find the device sharing our registers to
- * coordinate access with it.
- */
- for_each_compatible_node(mdio, NULL, "fsl,gianfar-mdio") {
- if (of_address_to_resource(mdio, 0, &res))
- continue;
-
- if (res.start >= r[0].start && res.end <= r[0].end) {
- /* Get the ID the mdio bus platform device was
- * registered with. gfar_data.bus_id is
- * different because it's for finding a PHY,
- * while this is for finding a MII bus.
- */
- gfar_data.mdio_bus = res.start&0xfffff;
- of_node_put(mdio);
- break;
- }
- }
+ ret = of_address_to_resource(np, 0, &r);
+ if (ret)
+ goto err;
- ret =
- platform_device_add_data(gfar_dev, &gfar_data,
- sizeof(struct
- gianfar_platform_data));
- if (ret)
- goto unreg;
+ dev = platform_device_register_simple("mpc83xx_wdt", 0, &r, 1);
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
+ goto err;
}
+ ret = platform_device_add_data(dev, &freq, sizeof(freq));
+ if (ret)
+ goto unreg;
+
+ of_node_put(np);
return 0;
unreg:
- platform_device_unregister(gfar_dev);
+ platform_device_unregister(dev);
err:
+ of_node_put(np);
+nodev:
return ret;
}
-arch_initcall(gfar_of_init);
+arch_initcall(mpc83xx_wdt_init);
+#endif
static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type)
{
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 60f7f227327c..9c744e4285a0 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -5,8 +5,13 @@
#include <asm/mmu.h>
extern phys_addr_t get_immrbase(void);
+#if defined(CONFIG_CPM2) || defined(CONFIG_QUICC_ENGINE) || defined(CONFIG_8xx)
extern u32 get_brgfreq(void);
extern u32 get_baudrate(void);
+#else
+static inline u32 get_brgfreq(void) { return -1; }
+static inline u32 get_baudrate(void) { return -1; }
+#endif
extern u32 fsl_get_sys_freq(void);
struct spi_board_info;
diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c
index d502927644c6..5da37c2f22ee 100644
--- a/arch/powerpc/sysdev/grackle.c
+++ b/arch/powerpc/sysdev/grackle.c
@@ -57,7 +57,7 @@ void __init setup_grackle(struct pci_controller *hose)
{
setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0);
if (machine_is_compatible("PowerMac1,1"))
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
if (machine_is_compatible("AAPL,PowerBook1998"))
grackle_set_loop_snoop(hose, 1);
#if 0 /* Disabled for now, HW problems ??? */
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 1890fb085cde..3e0d89dcdba2 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -661,17 +661,6 @@ static inline void mpic_eoi(struct mpic *mpic)
(void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI));
}
-#ifdef CONFIG_SMP
-static irqreturn_t mpic_ipi_action(int irq, void *data)
-{
- long ipi = (long)data;
-
- smp_message_recv(ipi);
-
- return IRQ_HANDLED;
-}
-#endif /* CONFIG_SMP */
-
/*
* Linux descriptor level callbacks
*/
@@ -817,7 +806,7 @@ static void mpic_end_ipi(unsigned int irq)
#endif /* CONFIG_SMP */
-void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
+void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
{
struct mpic *mpic = mpic_from_irq(irq);
unsigned int src = mpic_irq_to_hw(irq);
@@ -829,7 +818,7 @@ void mpic_set_affinity(unsigned int irq, cpumask_t cpumask)
} else {
cpumask_t tmp;
- cpus_and(tmp, cpumask, cpu_online_map);
+ cpumask_and(&tmp, cpumask, cpu_online_mask);
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
mpic_physmask(cpus_addr(tmp)[0]));
@@ -1548,13 +1537,7 @@ unsigned int mpic_get_mcirq(void)
void mpic_request_ipis(void)
{
struct mpic *mpic = mpic_primary;
- long i, err;
- static char *ipi_names[] = {
- "IPI0 (call function)",
- "IPI1 (reschedule)",
- "IPI2 (call function single)",
- "IPI3 (debugger break)",
- };
+ int i;
BUG_ON(mpic == NULL);
printk(KERN_INFO "mpic: requesting IPIs ... \n");
@@ -1563,17 +1546,10 @@ void mpic_request_ipis(void)
unsigned int vipi = irq_create_mapping(mpic->irqhost,
mpic->ipi_vecs[0] + i);
if (vipi == NO_IRQ) {
- printk(KERN_ERR "Failed to map IPI %ld\n", i);
- break;
- }
- err = request_irq(vipi, mpic_ipi_action,
- IRQF_DISABLED|IRQF_PERCPU,
- ipi_names[i], (void *)i);
- if (err) {
- printk(KERN_ERR "Request of irq %d for IPI %ld failed\n",
- vipi, i);
- break;
+ printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]);
+ continue;
}
+ smp_request_message_ipi(vipi, i);
}
}
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h
index 6209c62a426d..3cef2af10f42 100644
--- a/arch/powerpc/sysdev/mpic.h
+++ b/arch/powerpc/sysdev/mpic.h
@@ -36,6 +36,6 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic)
extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type);
extern void mpic_set_vector(unsigned int virq, unsigned int vector);
-extern void mpic_set_affinity(unsigned int irq, cpumask_t cpumask);
+extern void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask);
#endif /* _POWERPC_SYSDEV_MPIC_H */
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index d3e4d61030b5..77fae5f64f2e 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -194,11 +194,41 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
* 4xx PCI 2.x part
*/
+static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
+ void __iomem *reg,
+ u64 plb_addr,
+ u64 pci_addr,
+ u64 size,
+ unsigned int flags,
+ int index)
+{
+ u32 ma, pcila, pciha;
+
+ if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
+ size < 0x1000 || (plb_addr & (size - 1)) != 0) {
+ printk(KERN_WARNING "%s: Resource out of range\n",
+ hose->dn->full_name);
+ return -1;
+ }
+ ma = (0xffffffffu << ilog2(size)) | 1;
+ if (flags & IORESOURCE_PREFETCH)
+ ma |= 2;
+
+ pciha = RES_TO_U32_HIGH(pci_addr);
+ pcila = RES_TO_U32_LOW(pci_addr);
+
+ writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index));
+ writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index));
+ writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index));
+ writel(ma, reg + PCIL0_PMM0MA + (0x10 * index));
+
+ return 0;
+}
+
static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
void __iomem *reg)
{
- u32 la, ma, pcila, pciha;
- int i, j;
+ int i, j, found_isa_hole = 0;
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
@@ -213,28 +243,29 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
break;
}
- /* Calculate register values */
- la = res->start;
- pciha = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset);
- pcila = RES_TO_U32_LOW(res->start - hose->pci_mem_offset);
-
- ma = res->end + 1 - res->start;
- if (!is_power_of_2(ma) || ma < 0x1000 || ma > 0xffffffffu) {
- printk(KERN_WARNING "%s: Resource out of range\n",
- hose->dn->full_name);
- continue;
+ /* Configure the resource */
+ if (ppc4xx_setup_one_pci_PMM(hose, reg,
+ res->start,
+ res->start - hose->pci_mem_offset,
+ res->end + 1 - res->start,
+ res->flags,
+ j) == 0) {
+ j++;
+
+ /* If the resource PCI address is 0 then we have our
+ * ISA memory hole
+ */
+ if (res->start == hose->pci_mem_offset)
+ found_isa_hole = 1;
}
- ma = (0xffffffffu << ilog2(ma)) | 0x1;
- if (res->flags & IORESOURCE_PREFETCH)
- ma |= 0x2;
-
- /* Program register values */
- writel(la, reg + PCIL0_PMM0LA + (0x10 * j));
- writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * j));
- writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * j));
- writel(ma, reg + PCIL0_PMM0MA + (0x10 * j));
- j++;
}
+
+ /* Handle ISA memory hole if not already covered */
+ if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
+ if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
+ hose->isa_mem_size, 0, j) == 0)
+ printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
+ hose->dn->full_name);
}
static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
@@ -352,11 +383,52 @@ static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
* 4xx PCI-X part
*/
+static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose,
+ void __iomem *reg,
+ u64 plb_addr,
+ u64 pci_addr,
+ u64 size,
+ unsigned int flags,
+ int index)
+{
+ u32 lah, lal, pciah, pcial, sa;
+
+ if (!is_power_of_2(size) || size < 0x1000 ||
+ (plb_addr & (size - 1)) != 0) {
+ printk(KERN_WARNING "%s: Resource out of range\n",
+ hose->dn->full_name);
+ return -1;
+ }
+
+ /* Calculate register values */
+ lah = RES_TO_U32_HIGH(plb_addr);
+ lal = RES_TO_U32_LOW(plb_addr);
+ pciah = RES_TO_U32_HIGH(pci_addr);
+ pcial = RES_TO_U32_LOW(pci_addr);
+ sa = (0xffffffffu << ilog2(size)) | 0x1;
+
+ /* Program register values */
+ if (index == 0) {
+ writel(lah, reg + PCIX0_POM0LAH);
+ writel(lal, reg + PCIX0_POM0LAL);
+ writel(pciah, reg + PCIX0_POM0PCIAH);
+ writel(pcial, reg + PCIX0_POM0PCIAL);
+ writel(sa, reg + PCIX0_POM0SA);
+ } else {
+ writel(lah, reg + PCIX0_POM1LAH);
+ writel(lal, reg + PCIX0_POM1LAL);
+ writel(pciah, reg + PCIX0_POM1PCIAH);
+ writel(pcial, reg + PCIX0_POM1PCIAL);
+ writel(sa, reg + PCIX0_POM1SA);
+ }
+
+ return 0;
+}
+
static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
void __iomem *reg)
{
- u32 lah, lal, pciah, pcial, sa;
- int i, j;
+ int i, j, found_isa_hole = 0;
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
@@ -371,36 +443,29 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
break;
}
- /* Calculate register values */
- lah = RES_TO_U32_HIGH(res->start);
- lal = RES_TO_U32_LOW(res->start);
- pciah = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset);
- pcial = RES_TO_U32_LOW(res->start - hose->pci_mem_offset);
- sa = res->end + 1 - res->start;
- if (!is_power_of_2(sa) || sa < 0x100000 ||
- sa > 0xffffffffu) {
- printk(KERN_WARNING "%s: Resource out of range\n",
- hose->dn->full_name);
- continue;
+ /* Configure the resource */
+ if (ppc4xx_setup_one_pcix_POM(hose, reg,
+ res->start,
+ res->start - hose->pci_mem_offset,
+ res->end + 1 - res->start,
+ res->flags,
+ j) == 0) {
+ j++;
+
+ /* If the resource PCI address is 0 then we have our
+ * ISA memory hole
+ */
+ if (res->start == hose->pci_mem_offset)
+ found_isa_hole = 1;
}
- sa = (0xffffffffu << ilog2(sa)) | 0x1;
-
- /* Program register values */
- if (j == 0) {
- writel(lah, reg + PCIX0_POM0LAH);
- writel(lal, reg + PCIX0_POM0LAL);
- writel(pciah, reg + PCIX0_POM0PCIAH);
- writel(pcial, reg + PCIX0_POM0PCIAL);
- writel(sa, reg + PCIX0_POM0SA);
- } else {
- writel(lah, reg + PCIX0_POM1LAH);
- writel(lal, reg + PCIX0_POM1LAL);
- writel(pciah, reg + PCIX0_POM1PCIAH);
- writel(pcial, reg + PCIX0_POM1PCIAL);
- writel(sa, reg + PCIX0_POM1SA);
- }
- j++;
}
+
+ /* Handle ISA memory hole if not already covered */
+ if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
+ if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
+ hose->isa_mem_size, 0, j) == 0)
+ printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
+ hose->dn->full_name);
}
static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
@@ -1317,12 +1382,72 @@ static struct pci_ops ppc4xx_pciex_pci_ops =
.write = ppc4xx_pciex_write_config,
};
+static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port,
+ struct pci_controller *hose,
+ void __iomem *mbase,
+ u64 plb_addr,
+ u64 pci_addr,
+ u64 size,
+ unsigned int flags,
+ int index)
+{
+ u32 lah, lal, pciah, pcial, sa;
+
+ if (!is_power_of_2(size) ||
+ (index < 2 && size < 0x100000) ||
+ (index == 2 && size < 0x100) ||
+ (plb_addr & (size - 1)) != 0) {
+ printk(KERN_WARNING "%s: Resource out of range\n",
+ hose->dn->full_name);
+ return -1;
+ }
+
+ /* Calculate register values */
+ lah = RES_TO_U32_HIGH(plb_addr);
+ lal = RES_TO_U32_LOW(plb_addr);
+ pciah = RES_TO_U32_HIGH(pci_addr);
+ pcial = RES_TO_U32_LOW(pci_addr);
+ sa = (0xffffffffu << ilog2(size)) | 0x1;
+
+ /* Program register values */
+ switch (index) {
+ case 0:
+ out_le32(mbase + PECFG_POM0LAH, pciah);
+ out_le32(mbase + PECFG_POM0LAL, pcial);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
+ /* Note that 3 here means enabled | single region */
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | 3);
+ break;
+ case 1:
+ out_le32(mbase + PECFG_POM1LAH, pciah);
+ out_le32(mbase + PECFG_POM1LAL, pcial);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
+ /* Note that 3 here means enabled | single region */
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, sa | 3);
+ break;
+ case 2:
+ out_le32(mbase + PECFG_POM2LAH, pciah);
+ out_le32(mbase + PECFG_POM2LAL, pcial);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
+ /* Note that 3 here means enabled | IO space !!! */
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, sa | 3);
+ break;
+ }
+
+ return 0;
+}
+
static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
struct pci_controller *hose,
void __iomem *mbase)
{
- u32 lah, lal, pciah, pcial, sa;
- int i, j;
+ int i, j, found_isa_hole = 0;
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
@@ -1337,53 +1462,38 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
break;
}
- /* Calculate register values */
- lah = RES_TO_U32_HIGH(res->start);
- lal = RES_TO_U32_LOW(res->start);
- pciah = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset);
- pcial = RES_TO_U32_LOW(res->start - hose->pci_mem_offset);
- sa = res->end + 1 - res->start;
- if (!is_power_of_2(sa) || sa < 0x100000 ||
- sa > 0xffffffffu) {
- printk(KERN_WARNING "%s: Resource out of range\n",
- port->node->full_name);
- continue;
- }
- sa = (0xffffffffu << ilog2(sa)) | 0x1;
-
- /* Program register values */
- switch (j) {
- case 0:
- out_le32(mbase + PECFG_POM0LAH, pciah);
- out_le32(mbase + PECFG_POM0LAL, pcial);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | 3);
- break;
- case 1:
- out_le32(mbase + PECFG_POM1LAH, pciah);
- out_le32(mbase + PECFG_POM1LAL, pcial);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, sa | 3);
- break;
+ /* Configure the resource */
+ if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
+ res->start,
+ res->start - hose->pci_mem_offset,
+ res->end + 1 - res->start,
+ res->flags,
+ j) == 0) {
+ j++;
+
+ /* If the resource PCI address is 0 then we have our
+ * ISA memory hole
+ */
+ if (res->start == hose->pci_mem_offset)
+ found_isa_hole = 1;
}
- j++;
}
- /* Configure IO, always 64K starting at 0 */
- if (hose->io_resource.flags & IORESOURCE_IO) {
- lah = RES_TO_U32_HIGH(hose->io_base_phys);
- lal = RES_TO_U32_LOW(hose->io_base_phys);
- out_le32(mbase + PECFG_POM2LAH, 0);
- out_le32(mbase + PECFG_POM2LAL, 0);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0xffff0000 | 3);
- }
+ /* Handle ISA memory hole if not already covered */
+ if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
+ if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
+ hose->isa_mem_phys, 0,
+ hose->isa_mem_size, 0, j) == 0)
+ printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
+ hose->dn->full_name);
+
+ /* Configure IO, always 64K starting at 0. We hard wire it to 64K !
+ * Note also that it -has- to be region index 2 on this HW
+ */
+ if (hose->io_resource.flags & IORESOURCE_IO)
+ ppc4xx_setup_one_pciex_POM(port, hose, mbase,
+ hose->io_base_phys, 0,
+ 0x10000, IORESOURCE_IO, 2);
}
static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig
index 76ffbc48d4b9..41ac3dfac98e 100644
--- a/arch/powerpc/sysdev/qe_lib/Kconfig
+++ b/arch/powerpc/sysdev/qe_lib/Kconfig
@@ -22,5 +22,6 @@ config UCC
config QE_USB
bool
+ default y if USB_GADGET_FSL_QE
help
- QE USB Host Controller support
+ QE USB Controller support
diff --git a/arch/powerpc/sysdev/qe_lib/gpio.c b/arch/powerpc/sysdev/qe_lib/gpio.c
index 8e5a0bc36d0b..3485288dce31 100644
--- a/arch/powerpc/sysdev/qe_lib/gpio.c
+++ b/arch/powerpc/sysdev/qe_lib/gpio.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
@@ -24,8 +25,14 @@ struct qe_gpio_chip {
struct of_mm_gpio_chip mm_gc;
spinlock_t lock;
+ unsigned long pin_flags[QE_PIO_PINS];
+#define QE_PIN_REQUESTED 0
+
/* shadowed data register to clear/set bits safely */
u32 cpdata;
+
+ /* saved_regs used to restore dedicated functions */
+ struct qe_pio_regs saved_regs;
};
static inline struct qe_gpio_chip *
@@ -40,6 +47,12 @@ static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
struct qe_pio_regs __iomem *regs = mm_gc->regs;
qe_gc->cpdata = in_be32(&regs->cpdata);
+ qe_gc->saved_regs.cpdata = qe_gc->cpdata;
+ qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1);
+ qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2);
+ qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1);
+ qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2);
+ qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr);
}
static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
@@ -103,6 +116,188 @@ static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
return 0;
}
+struct qe_pin {
+ /*
+ * The qe_gpio_chip name is unfortunate, we should change that to
+ * something like qe_pio_controller. Someday.
+ */
+ struct qe_gpio_chip *controller;
+ int num;
+};
+
+/**
+ * qe_pin_request - Request a QE pin
+ * @np: device node to get a pin from
+ * @index: index of a pin in the device tree
+ * Context: non-atomic
+ *
+ * This function return qe_pin so that you could use it with the rest of
+ * the QE Pin Multiplexing API.
+ */
+struct qe_pin *qe_pin_request(struct device_node *np, int index)
+{
+ struct qe_pin *qe_pin;
+ struct device_node *gc;
+ struct of_gpio_chip *of_gc = NULL;
+ struct of_mm_gpio_chip *mm_gc;
+ struct qe_gpio_chip *qe_gc;
+ int err;
+ int size;
+ const void *gpio_spec;
+ const u32 *gpio_cells;
+ unsigned long flags;
+
+ qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
+ if (!qe_pin) {
+ pr_debug("%s: can't allocate memory\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ err = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index,
+ &gc, &gpio_spec);
+ if (err) {
+ pr_debug("%s: can't parse gpios property\n", __func__);
+ goto err0;
+ }
+
+ if (!of_device_is_compatible(gc, "fsl,mpc8323-qe-pario-bank")) {
+ pr_debug("%s: tried to get a non-qe pin\n", __func__);
+ err = -EINVAL;
+ goto err1;
+ }
+
+ of_gc = gc->data;
+ if (!of_gc) {
+ pr_debug("%s: gpio controller %s isn't registered\n",
+ np->full_name, gc->full_name);
+ err = -ENODEV;
+ goto err1;
+ }
+
+ gpio_cells = of_get_property(gc, "#gpio-cells", &size);
+ if (!gpio_cells || size != sizeof(*gpio_cells) ||
+ *gpio_cells != of_gc->gpio_cells) {
+ pr_debug("%s: wrong #gpio-cells for %s\n",
+ np->full_name, gc->full_name);
+ err = -EINVAL;
+ goto err1;
+ }
+
+ err = of_gc->xlate(of_gc, np, gpio_spec, NULL);
+ if (err < 0)
+ goto err1;
+
+ mm_gc = to_of_mm_gpio_chip(&of_gc->gc);
+ qe_gc = to_qe_gpio_chip(mm_gc);
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) {
+ qe_pin->controller = qe_gc;
+ qe_pin->num = err;
+ err = 0;
+ } else {
+ err = -EBUSY;
+ }
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ if (!err)
+ return qe_pin;
+err1:
+ of_node_put(gc);
+err0:
+ kfree(qe_pin);
+ pr_debug("%s failed with status %d\n", __func__, err);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(qe_pin_request);
+
+/**
+ * qe_pin_free - Free a pin
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function frees the qe_pin structure and makes a pin available
+ * for further qe_pin_request() calls.
+ */
+void qe_pin_free(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ unsigned long flags;
+ const int pin = qe_pin->num;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+ test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]);
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ kfree(qe_pin);
+}
+EXPORT_SYMBOL(qe_pin_free);
+
+/**
+ * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function resets a pin to a dedicated peripheral function that
+ * has been set up by the firmware.
+ */
+void qe_pin_set_dedicated(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ struct qe_pio_regs *sregs = &qe_gc->saved_regs;
+ int pin = qe_pin->num;
+ u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
+ u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2);
+ bool second_reg = pin > (QE_PIO_PINS / 2) - 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ if (second_reg) {
+ clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2);
+ clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2);
+ } else {
+ clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2);
+ clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2);
+ }
+
+ if (sregs->cpdata & mask1)
+ qe_gc->cpdata |= mask1;
+ else
+ qe_gc->cpdata &= ~mask1;
+
+ out_be32(&regs->cpdata, qe_gc->cpdata);
+ clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_dedicated);
+
+/**
+ * qe_pin_set_gpio - Set a pin to the GPIO mode
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function sets a pin to the GPIO mode.
+ */
+void qe_pin_set_gpio(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ /* Let's make it input by default, GPIO API is able to change that. */
+ __par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_gpio);
+
static int __init qe_add_gpiochips(void)
{
struct device_node *np;
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index b3b73ae57d6d..01bce3784b0a 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
+#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/bootmem.h>
@@ -38,6 +39,8 @@ static void qe_snums_init(void);
static int qe_sdma_init(void);
static DEFINE_SPINLOCK(qe_lock);
+DEFINE_SPINLOCK(cmxgcr_lock);
+EXPORT_SYMBOL(cmxgcr_lock);
/* QE snum state */
enum qe_snum_state {
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c
index 1d78071aad7d..ebb442ea1917 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc.c
@@ -18,6 +18,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/stddef.h>
+#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/irq.h>
@@ -26,9 +27,6 @@
#include <asm/qe.h>
#include <asm/ucc.h>
-DEFINE_SPINLOCK(cmxgcr_lock);
-EXPORT_SYMBOL(cmxgcr_lock);
-
int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
{
unsigned long flags;
diff --git a/arch/powerpc/sysdev/simple_gpio.c b/arch/powerpc/sysdev/simple_gpio.c
new file mode 100644
index 000000000000..43c4569e24b7
--- /dev/null
+++ b/arch/powerpc/sysdev/simple_gpio.c
@@ -0,0 +1,155 @@
+/*
+ * Simple Memory-Mapped GPIOs
+ *
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <asm/prom.h>
+#include "simple_gpio.h"
+
+struct u8_gpio_chip {
+ struct of_mm_gpio_chip mm_gc;
+ spinlock_t lock;
+
+ /* shadowed data register to clear/set bits safely */
+ u8 data;
+};
+
+static struct u8_gpio_chip *to_u8_gpio_chip(struct of_mm_gpio_chip *mm_gc)
+{
+ return container_of(mm_gc, struct u8_gpio_chip, mm_gc);
+}
+
+static u8 u8_pin2mask(unsigned int pin)
+{
+ return 1 << (8 - 1 - pin);
+}
+
+static int u8_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+
+ return in_8(mm_gc->regs) & u8_pin2mask(gpio);
+}
+
+static void u8_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&u8_gc->lock, flags);
+
+ if (val)
+ u8_gc->data |= u8_pin2mask(gpio);
+ else
+ u8_gc->data &= ~u8_pin2mask(gpio);
+
+ out_8(mm_gc->regs, u8_gc->data);
+
+ spin_unlock_irqrestore(&u8_gc->lock, flags);
+}
+
+static int u8_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ return 0;
+}
+
+static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ u8_gpio_set(gc, gpio, val);
+ return 0;
+}
+
+static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+ struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc);
+
+ u8_gc->data = in_8(mm_gc->regs);
+}
+
+static int __init u8_simple_gpiochip_add(struct device_node *np)
+{
+ int ret;
+ struct u8_gpio_chip *u8_gc;
+ struct of_mm_gpio_chip *mm_gc;
+ struct of_gpio_chip *of_gc;
+ struct gpio_chip *gc;
+
+ u8_gc = kzalloc(sizeof(*u8_gc), GFP_KERNEL);
+ if (!u8_gc)
+ return -ENOMEM;
+
+ spin_lock_init(&u8_gc->lock);
+
+ mm_gc = &u8_gc->mm_gc;
+ of_gc = &mm_gc->of_gc;
+ gc = &of_gc->gc;
+
+ mm_gc->save_regs = u8_gpio_save_regs;
+ of_gc->gpio_cells = 2;
+ gc->ngpio = 8;
+ gc->direction_input = u8_gpio_dir_in;
+ gc->direction_output = u8_gpio_dir_out;
+ gc->get = u8_gpio_get;
+ gc->set = u8_gpio_set;
+
+ ret = of_mm_gpiochip_add(np, mm_gc);
+ if (ret)
+ goto err;
+ return 0;
+err:
+ kfree(u8_gc);
+ return ret;
+}
+
+void __init simple_gpiochip_init(const char *compatible)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, NULL, compatible) {
+ int ret;
+ struct resource r;
+
+ ret = of_address_to_resource(np, 0, &r);
+ if (ret)
+ goto err;
+
+ switch (resource_size(&r)) {
+ case 1:
+ ret = u8_simple_gpiochip_add(np);
+ if (ret)
+ goto err;
+ break;
+ default:
+ /*
+ * Whenever you need support for GPIO bank width > 1,
+ * please just turn u8_ code into huge macros, and
+ * construct needed uX_ code with it.
+ */
+ ret = -ENOSYS;
+ goto err;
+ }
+ continue;
+err:
+ pr_err("%s: registration failed, status %d\n",
+ np->full_name, ret);
+ }
+}
diff --git a/arch/powerpc/sysdev/simple_gpio.h b/arch/powerpc/sysdev/simple_gpio.h
new file mode 100644
index 000000000000..3a7b0c513c76
--- /dev/null
+++ b/arch/powerpc/sysdev/simple_gpio.h
@@ -0,0 +1,12 @@
+#ifndef __SYSDEV_SIMPLE_GPIO_H
+#define __SYSDEV_SIMPLE_GPIO_H
+
+#include <linux/errno.h>
+
+#ifdef CONFIG_SIMPLE_GPIO
+extern void simple_gpiochip_init(const char *compatible);
+#else
+static inline void simple_gpiochip_init(const char *compatible) {}
+#endif /* CONFIG_SIMPLE_GPIO */
+
+#endif /* __SYSDEV_SIMPLE_GPIO_H */
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 51d97588e762..9cb03b71b9d6 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -4,7 +4,7 @@ ifdef CONFIG_PPC64
EXTRA_CFLAGS += -mno-minimal-toc
endif
-obj-y += xmon.o setjmp.o start.o nonstdio.o
+obj-y += xmon.o start.o nonstdio.o
ifdef CONFIG_XMON_DISASSEMBLY
obj-y += ppc-dis.o ppc-opc.o
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 076368c8b8a9..8dfad7d9a004 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -41,6 +41,7 @@
#include <asm/spu_priv1.h>
#include <asm/firmware.h>
#include <asm/setjmp.h>
+#include <asm/reg.h>
#ifdef CONFIG_PPC64
#include <asm/hvcall.h>
@@ -159,8 +160,6 @@ static int xmon_no_auto_backtrace;
extern void xmon_enter(void);
extern void xmon_leave(void);
-extern void xmon_save_regs(struct pt_regs *);
-
#ifdef CONFIG_PPC64
#define REG "%.16lx"
#define REGS_PER_LINE 4
@@ -532,7 +531,7 @@ int xmon(struct pt_regs *excp)
struct pt_regs regs;
if (excp == NULL) {
- xmon_save_regs(&regs);
+ ppc_save_regs(&regs);
excp = &regs;
}