aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/powerpc/Kconfig64
-rw-r--r--arch/powerpc/Kconfig.debug9
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/boot/Makefile1
-rw-r--r--arch/powerpc/boot/devtree.c2
-rw-r--r--arch/powerpc/boot/dts/bamboo.dts3
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts14
-rw-r--r--arch/powerpc/boot/dts/gef_sbc610.dts11
-rw-r--r--arch/powerpc/boot/dts/kuroboxHD.dts1
-rw-r--r--arch/powerpc/boot/dts/kuroboxHG.dts1
-rw-r--r--arch/powerpc/boot/dts/lite5200.dts1
-rw-r--r--arch/powerpc/boot/dts/lite5200b.dts1
-rw-r--r--arch/powerpc/boot/dts/motionpro.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8315erdb.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitx.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8349emitxgp.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8377_rdb.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8378_rdb.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8379_rdb.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds.dts113
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts483
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts234
-rw-r--r--arch/powerpc/boot/dts/pcm030.dts2
-rw-r--r--arch/powerpc/boot/dts/tqm5200.dts1
-rw-r--r--arch/powerpc/boot/libfdt-wrapper.c2
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc610_defconfig8
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig12
-rw-r--r--arch/powerpc/include/asm/atomic.h18
-rw-r--r--arch/powerpc/include/asm/bug.h11
-rw-r--r--arch/powerpc/include/asm/byteorder.h38
-rw-r--r--arch/powerpc/include/asm/cputable.h113
-rw-r--r--arch/powerpc/include/asm/dcr-native.h63
-rw-r--r--arch/powerpc/include/asm/dcr.h4
-rw-r--r--arch/powerpc/include/asm/device.h12
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h156
-rw-r--r--arch/powerpc/include/asm/eeh.h8
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h30
-rw-r--r--arch/powerpc/include/asm/highmem.h23
-rw-r--r--arch/powerpc/include/asm/io.h7
-rw-r--r--arch/powerpc/include/asm/kdump.h13
-rw-r--r--arch/powerpc/include/asm/kexec.h15
-rw-r--r--arch/powerpc/include/asm/local.h4
-rw-r--r--arch/powerpc/include/asm/lppaca.h3
-rw-r--r--arch/powerpc/include/asm/mmu-40x.h5
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h22
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h3
-rw-r--r--arch/powerpc/include/asm/mmu-fsl-booke.h7
-rw-r--r--arch/powerpc/include/asm/mmu.h57
-rw-r--r--arch/powerpc/include/asm/mmu_context.h257
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h19
-rw-r--r--arch/powerpc/include/asm/mpc52xx_psc.h11
-rw-r--r--arch/powerpc/include/asm/mutex.h135
-rw-r--r--arch/powerpc/include/asm/page.h13
-rw-r--r--arch/powerpc/include/asm/page_32.h7
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h30
-rw-r--r--arch/powerpc/include/asm/pci.h15
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h11
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h34
-rw-r--r--arch/powerpc/include/asm/pgalloc.h41
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h42
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h15
-rw-r--r--arch/powerpc/include/asm/pgtable.h26
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h4
-rw-r--r--arch/powerpc/include/asm/processor.h8
-rw-r--r--arch/powerpc/include/asm/prom.h3
-rw-r--r--arch/powerpc/include/asm/ps3.h56
-rw-r--r--arch/powerpc/include/asm/ps3av.h4
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/rtas.h1
-rw-r--r--arch/powerpc/include/asm/sfp-machine.h58
-rw-r--r--arch/powerpc/include/asm/smp.h7
-rw-r--r--arch/powerpc/include/asm/spinlock.h2
-rw-r--r--arch/powerpc/include/asm/synch.h4
-rw-r--r--arch/powerpc/include/asm/system.h24
-rw-r--r--arch/powerpc/include/asm/time.h20
-rw-r--r--arch/powerpc/include/asm/tlbflush.h87
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h3
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c6
-rw-r--r--arch/powerpc/kernel/cputable.c117
-rw-r--r--arch/powerpc/kernel/dma.c26
-rw-r--r--arch/powerpc/kernel/head_32.S31
-rw-r--r--arch/powerpc/kernel/head_44x.S34
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S107
-rw-r--r--arch/powerpc/kernel/ibmebus.c3
-rw-r--r--arch/powerpc/kernel/machine_kexec.c91
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c78
-rw-r--r--arch/powerpc/kernel/misc_32.S238
-rw-r--r--arch/powerpc/kernel/module.c6
-rw-r--r--arch/powerpc/kernel/of_device.c18
-rw-r--r--arch/powerpc/kernel/paca.c1
-rw-r--r--arch/powerpc/kernel/pci-common.c300
-rw-r--r--arch/powerpc/kernel/pci_32.c108
-rw-r--r--arch/powerpc/kernel/pci_64.c134
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c9
-rw-r--r--arch/powerpc/kernel/ppc_save_regs.S (renamed from arch/powerpc/xmon/setjmp.S)2
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/kernel/prom.c47
-rw-r--r--arch/powerpc/kernel/prom_parse.c5
-rw-r--r--arch/powerpc/kernel/rtas.c26
-rw-r--r--arch/powerpc/kernel/rtas_pci.c48
-rw-r--r--arch/powerpc/kernel/setup_32.c15
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kernel/smp-tbsync.c12
-rw-r--r--arch/powerpc/kernel/smp.c71
-rw-r--r--arch/powerpc/kernel/swsusp.c2
-rw-r--r--arch/powerpc/kernel/swsusp_32.S6
-rw-r--r--arch/powerpc/kernel/sysfs.c7
-rw-r--r--arch/powerpc/kernel/time.c36
-rw-r--r--arch/powerpc/kernel/traps.c62
-rw-r--r--arch/powerpc/kernel/vdso.c10
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S208
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S3
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S141
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S3
-rw-r--r--arch/powerpc/kernel/vio.c12
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S6
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/powerpc/lib/copyuser_64.S17
-rw-r--r--arch/powerpc/lib/dma-noncoherent.c25
-rw-r--r--arch/powerpc/lib/memcpy_64.S16
-rw-r--r--arch/powerpc/math-emu/Makefile2
-rw-r--r--arch/powerpc/math-emu/fadd.c1
-rw-r--r--arch/powerpc/math-emu/fcmpo.c5
-rw-r--r--arch/powerpc/math-emu/fdiv.c9
-rw-r--r--arch/powerpc/math-emu/fdivs.c9
-rw-r--r--arch/powerpc/math-emu/fmadd.c5
-rw-r--r--arch/powerpc/math-emu/fmadds.c5
-rw-r--r--arch/powerpc/math-emu/fmsub.c5
-rw-r--r--arch/powerpc/math-emu/fmsubs.c5
-rw-r--r--arch/powerpc/math-emu/fmul.c3
-rw-r--r--arch/powerpc/math-emu/fmuls.c3
-rw-r--r--arch/powerpc/math-emu/fnmadd.c5
-rw-r--r--arch/powerpc/math-emu/fnmadds.c5
-rw-r--r--arch/powerpc/math-emu/fnmsub.c5
-rw-r--r--arch/powerpc/math-emu/fnmsubs.c5
-rw-r--r--arch/powerpc/math-emu/fsqrt.c5
-rw-r--r--arch/powerpc/math-emu/fsqrts.c5
-rw-r--r--arch/powerpc/math-emu/fsub.c3
-rw-r--r--arch/powerpc/math-emu/fsubs.c3
-rw-r--r--arch/powerpc/math-emu/math_efp.c720
-rw-r--r--arch/powerpc/mm/Makefile10
-rw-r--r--arch/powerpc/mm/fault.c14
-rw-r--r--arch/powerpc/mm/hash_low_32.S111
-rw-r--r--arch/powerpc/mm/hugetlbpage.c22
-rw-r--r--arch/powerpc/mm/init_32.c6
-rw-r--r--arch/powerpc/mm/mem.c6
-rw-r--r--arch/powerpc/mm/mmu_context_32.c84
-rw-r--r--arch/powerpc/mm/mmu_context_hash32.c103
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c (renamed from arch/powerpc/mm/mmu_context_64.c)8
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c397
-rw-r--r--arch/powerpc/mm/mmu_decl.h65
-rw-r--r--arch/powerpc/mm/pgtable.c117
-rw-r--r--arch/powerpc/mm/pgtable_32.c56
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c10
-rw-r--r--arch/powerpc/mm/tlb_hash32.c (renamed from arch/powerpc/mm/tlb_32.c)4
-rw-r--r--arch/powerpc/mm/tlb_hash64.c (renamed from arch/powerpc/mm/tlb_64.c)86
-rw-r--r--arch/powerpc/mm/tlb_nohash.c209
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S166
-rw-r--r--arch/powerpc/platforms/40x/ep405.c2
-rw-r--r--arch/powerpc/platforms/40x/kilauea.c2
-rw-r--r--arch/powerpc/platforms/40x/ppc40x_simple.c2
-rw-r--r--arch/powerpc/platforms/44x/ebony.c2
-rw-r--r--arch/powerpc/platforms/44x/ppc44x_simple.c2
-rw-r--r--arch/powerpc/platforms/44x/sam440ep.c2
-rw-r--r--arch/powerpc/platforms/52xx/lite5200_pm.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pci.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c237
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.h53
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pm.c3
-rw-r--r--arch/powerpc/platforms/82xx/pq2.c2
-rw-r--r--arch/powerpc/platforms/85xx/Makefile2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c11
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c8
-rw-r--r--arch/powerpc/platforms/85xx/smp.c104
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/86xx/Makefile3
-rw-r--r--arch/powerpc/platforms/86xx/gef_gpio.c143
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype12
-rw-r--r--arch/powerpc/platforms/cell/Kconfig23
-rw-r--r--arch/powerpc/platforms/cell/Makefile17
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c9
-rw-r--r--arch/powerpc/platforms/cell/iommu.c5
-rw-r--r--arch/powerpc/platforms/cell/qpace_setup.c152
-rw-r--r--arch/powerpc/platforms/cell/setup.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c27
-rw-r--r--arch/powerpc/platforms/chrp/pci.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/c2k.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/prpmc2800.c6
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/maple/setup.c6
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c2
-rw-r--r--arch/powerpc/platforms/powermac/pci.c6
-rw-r--r--arch/powerpc/platforms/powermac/setup.c10
-rw-r--r--arch/powerpc/platforms/powermac/sleep.S5
-rw-r--r--arch/powerpc/platforms/powermac/smp.c2
-rw-r--r--arch/powerpc/platforms/ps3/device-init.c33
-rw-r--r--arch/powerpc/platforms/ps3/mm.c2
-rw-r--r--arch/powerpc/platforms/ps3/setup.c8
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c38
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig2
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c29
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c44
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c163
-rw-r--r--arch/powerpc/platforms/pseries/phyp_dump.c5
-rw-r--r--arch/powerpc/platforms/pseries/xics.c43
-rw-r--r--arch/powerpc/sysdev/bestcomm/ata.c3
-rw-r--r--arch/powerpc/sysdev/bestcomm/ata.h19
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.c7
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm.h61
-rw-r--r--arch/powerpc/sysdev/bestcomm/bestcomm_priv.h20
-rw-r--r--arch/powerpc/sysdev/dcr-low.S8
-rw-r--r--arch/powerpc/sysdev/dcr.c5
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c4
-rw-r--r--arch/powerpc/sysdev/grackle.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c32
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c306
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c3
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc.c4
-rw-r--r--arch/powerpc/xmon/Makefile2
-rw-r--r--arch/powerpc/xmon/xmon.c5
-rw-r--r--arch/sparc/include/asm/device.h12
-rw-r--r--drivers/ata/Kconfig3
-rw-r--r--drivers/ata/pata_mpc52xx.c546
-rw-r--r--drivers/char/Kconfig6
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/bsr.c84
-rw-r--r--drivers/char/hvc_console.c13
-rw-r--r--drivers/char/hvc_console.h2
-rw-r--r--drivers/char/hvc_iseries.c4
-rw-r--r--drivers/char/hvc_udbg.c96
-rw-r--r--drivers/char/hvc_vio.c4
-rw-r--r--drivers/char/hvcs.c2
-rw-r--r--drivers/char/hvsi.c2
-rw-r--r--drivers/edac/Kconfig2
-rw-r--r--drivers/macintosh/via-pmu.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c5
-rw-r--r--drivers/net/fec_mpc52xx_phy.c5
-rw-r--r--drivers/of/base.c68
-rw-r--r--drivers/of/gpio.c70
-rw-r--r--drivers/of/of_i2c.c11
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c69
-rw-r--r--drivers/ps3/ps3av.c20
-rw-r--r--drivers/ps3/ps3av_cmd.c4
-rw-r--r--drivers/rapidio/rio-scan.c8
-rw-r--r--drivers/serial/mpc52xx_uart.c74
-rw-r--r--drivers/serial/pmac_zilog.c27
-rw-r--r--drivers/video/controlfb.c4
-rw-r--r--drivers/video/ps3fb.c23
-rw-r--r--fs/proc/proc_devtree.c3
-rw-r--r--include/linux/of.h6
-rw-r--r--include/linux/of_gpio.h44
-rw-r--r--include/linux/rio_drv.h4
254 files changed, 7138 insertions, 2728 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index adb23ea1c1ef..79f25cef32df 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -285,6 +285,10 @@ config IOMMU_VMERGE
config IOMMU_HELPER
def_bool PPC64
+config PPC_NEED_DMA_SYNC_OPS
+ def_bool y
+ depends on NOT_COHERENT_CACHE
+
config HOTPLUG_CPU
bool "Support for enabling/disabling CPUs"
depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
@@ -322,7 +326,7 @@ config KEXEC
config CRASH_DUMP
bool "Build a kdump crash kernel"
- depends on PPC_MULTIPLATFORM && PPC64 && RELOCATABLE
+ depends on (PPC64 && RELOCATABLE) || 6xx
help
Build a kernel suitable for use as a kdump capture kernel.
The same kernel binary can be used as production kernel and dump
@@ -401,23 +405,53 @@ config PPC_HAS_HASH_64K
depends on PPC64
default n
-config PPC_64K_PAGES
- bool "64k page size"
- depends on PPC64
- select PPC_HAS_HASH_64K
+choice
+ prompt "Page size"
+ default PPC_4K_PAGES
help
- This option changes the kernel logical page size to 64k. On machines
- without processor support for 64k pages, the kernel will simulate
- them by loading each individual 4k page on demand transparently,
- while on hardware with such support, it will be used to map
- normal application pages.
+ Select the kernel logical page size. Increasing the page size
+ will reduce software overhead at each page boundary, allow
+ hardware prefetch mechanisms to be more effective, and allow
+ larger dma transfers increasing IO efficiency and reducing
+ overhead. However the utilization of memory will increase.
+ For example, each cached file will using a multiple of the
+ page size to hold its contents and the difference between the
+ end of file and the end of page is wasted.
+
+ Some dedicated systems, such as software raid serving with
+ accelerated calculations, have shown significant increases.
+
+ If you configure a 64 bit kernel for 64k pages but the
+ processor does not support them, then the kernel will simulate
+ them with 4k pages, loading them on demand, but with the
+ reduced software overhead and larger internal fragmentation.
+ For the 32 bit kernel, a large page option will not be offered
+ unless it is supported by the configured processor.
+
+ If unsure, choose 4K_PAGES.
+
+config PPC_4K_PAGES
+ bool "4k page size"
+
+config PPC_16K_PAGES
+ bool "16k page size" if 44x
+
+config PPC_64K_PAGES
+ bool "64k page size" if 44x || PPC_STD_MMU_64
+ select PPC_HAS_HASH_64K if PPC_STD_MMU_64
+
+endchoice
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
- range 9 64 if PPC_64K_PAGES
- default "9" if PPC_64K_PAGES
- range 13 64 if PPC64 && !PPC_64K_PAGES
- default "13" if PPC64 && !PPC_64K_PAGES
+ range 9 64 if PPC_STD_MMU_64 && PPC_64K_PAGES
+ default "9" if PPC_STD_MMU_64 && PPC_64K_PAGES
+ range 13 64 if PPC_STD_MMU_64 && !PPC_64K_PAGES
+ default "13" if PPC_STD_MMU_64 && !PPC_64K_PAGES
+ range 9 64 if PPC_STD_MMU_32 && PPC_16K_PAGES
+ default "9" if PPC_STD_MMU_32 && PPC_16K_PAGES
+ range 7 64 if PPC_STD_MMU_32 && PPC_64K_PAGES
+ default "7" if PPC_STD_MMU_32 && PPC_64K_PAGES
range 11 64
default "11"
help
@@ -437,7 +471,7 @@ config FORCE_MAX_ZONEORDER
config PPC_SUBPAGE_PROT
bool "Support setting protections for 4k subpages"
- depends on PPC_64K_PAGES
+ depends on PPC_STD_MMU_64 && PPC_64K_PAGES
help
This option adds support for a system call to allow user programs
to set access permissions (read/write, readonly, or no access)
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 15eb27861fc7..08f7cc0a1953 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -2,6 +2,15 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
+config PRINT_STACK_DEPTH
+ int "Stack depth to print" if DEBUG_KERNEL
+ default 64
+ help
+ This option allows you to set the stack depth that the kernel
+ prints in stack traces. This can be useful if your display is
+ too small and stack traces cause important information to
+ scroll off the screen.
+
config DEBUG_STACKOVERFLOW
bool "Check for stack overflows"
depends on DEBUG_KERNEL
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 1f0667069940..72d17f50e54f 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -107,7 +107,6 @@ KBUILD_CFLAGS += $(call cc-option,-mno-altivec)
# (We use all available options to help semi-broken compilers)
KBUILD_CFLAGS += $(call cc-option,-mno-spe)
KBUILD_CFLAGS += $(call cc-option,-mspe=no)
-KBUILD_CFLAGS += $(call cc-option,-mabi=no-spe)
# Enable unit-at-a-time mode when possible. It shrinks the
# kernel considerably.
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 3d3daa674299..f32829937aad 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -194,6 +194,7 @@ image-$(CONFIG_PPC_MAPLE) += zImage.pseries
image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries
image-$(CONFIG_PPC_PS3) += dtbImage.ps3
image-$(CONFIG_PPC_CELLEB) += zImage.pseries
+image-$(CONFIG_PPC_CELL_QPACE) += zImage.pseries
image-$(CONFIG_PPC_CHRP) += zImage.chrp
image-$(CONFIG_PPC_EFIKA) += zImage.chrp
image-$(CONFIG_PPC_PMAC) += zImage.pmac
diff --git a/arch/powerpc/boot/devtree.c b/arch/powerpc/boot/devtree.c
index 5d12336dc360..a7e21a35c03a 100644
--- a/arch/powerpc/boot/devtree.c
+++ b/arch/powerpc/boot/devtree.c
@@ -213,7 +213,7 @@ static int find_range(u32 *reg, u32 *ranges, int nregaddr,
u32 range_addr[MAX_ADDR_CELLS];
u32 range_size[MAX_ADDR_CELLS];
- copy_val(range_addr, ranges + i, naddr);
+ copy_val(range_addr, ranges + i, nregaddr);
copy_val(range_size, ranges + i + nregaddr + naddr, nsize);
if (compare_reg(reg, range_addr, range_size))
diff --git a/arch/powerpc/boot/dts/bamboo.dts b/arch/powerpc/boot/dts/bamboo.dts
index 6ce0cc2c0208..aa68911f6560 100644
--- a/arch/powerpc/boot/dts/bamboo.dts
+++ b/arch/powerpc/boot/dts/bamboo.dts
@@ -269,7 +269,8 @@
* later cannot be changed. Chip supports a second
* IO range but we don't use it for now
*/
- ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x20000000
+ ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x40000000
+ 0x02000000 0x00000000 0x00000000 0x00000000 0xe0000000 0x00000000 0x00100000
0x01000000 0x00000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>;
/* Inbound 2GB range starting at 0 */
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index 79fe412c11c9..8b5ba8261a36 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -40,6 +40,7 @@
d-cache-size = <32768>;
dcr-controller;
dcr-access-method = "native";
+ next-level-cache = <&L2C0>;
};
};
@@ -104,6 +105,16 @@
dcr-reg = <0x00c 0x002>;
};
+ L2C0: l2c {
+ compatible = "ibm,l2-cache-460ex", "ibm,l2-cache";
+ dcr-reg = <0x020 0x008 /* Internal SRAM DCR's */
+ 0x030 0x008>; /* L2 cache DCR's */
+ cache-line-size = <32>; /* 32 bytes */
+ cache-size = <262144>; /* L2, 256K */
+ interrupt-parent = <&UIC1>;
+ interrupts = <11 1>;
+ };
+
plb {
compatible = "ibm,plb-460ex", "ibm,plb4";
#address-cells = <2>;
@@ -343,6 +354,7 @@
* later cannot be changed
*/
ranges = <0x02000000 0x00000000 0x80000000 0x0000000d 0x80000000 0x00000000 0x80000000
+ 0x02000000 0x00000000 0x00000000 0x0000000c 0x0ee00000 0x00000000 0x00100000
0x01000000 0x00000000 0x00000000 0x0000000c 0x08000000 0x00000000 0x00010000>;
/* Inbound 2GB range starting at 0 */
@@ -373,6 +385,7 @@
* later cannot be changed
*/
ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x00000000 0x00000000 0x80000000
+ 0x02000000 0x00000000 0x00000000 0x0000000f 0x00000000 0x00000000 0x00100000
0x01000000 0x00000000 0x00000000 0x0000000f 0x80000000 0x00000000 0x00010000>;
/* Inbound 2GB range starting at 0 */
@@ -414,6 +427,7 @@
* later cannot be changed
*/
ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x80000000 0x00000000 0x80000000
+ 0x02000000 0x00000000 0x00000000 0x0000000f 0x00100000 0x00000000 0x00100000
0x01000000 0x00000000 0x00000000 0x0000000f 0x80010000 0x00000000 0x00010000>;
/* Inbound 2GB range starting at 0 */
diff --git a/arch/powerpc/boot/dts/gef_sbc610.dts b/arch/powerpc/boot/dts/gef_sbc610.dts
index e48cfa740c8a..9708b3423bbd 100644
--- a/arch/powerpc/boot/dts/gef_sbc610.dts
+++ b/arch/powerpc/boot/dts/gef_sbc610.dts
@@ -98,6 +98,12 @@
interrupt-parent = <&mpic>;
};
+ gef_gpio: gpio@7,14000 {
+ #gpio-cells = <2>;
+ compatible = "gef,sbc610-gpio";
+ reg = <0x7 0x14000 0x24>;
+ gpio-controller;
+ };
};
soc@fef00000 {
@@ -119,6 +125,11 @@
interrupt-parent = <&mpic>;
dfsrr;
+ rtc@51 {
+ compatible = "epson,rx8581";
+ reg = <0x00000051>;
+ };
+
eti@6b {
compatible = "dallas,ds1682";
reg = <0x6b>;
diff --git a/arch/powerpc/boot/dts/kuroboxHD.dts b/arch/powerpc/boot/dts/kuroboxHD.dts
index 2e5a1a1812b6..8d725d10882f 100644
--- a/arch/powerpc/boot/dts/kuroboxHD.dts
+++ b/arch/powerpc/boot/dts/kuroboxHD.dts
@@ -76,7 +76,6 @@ XXXX add flash parts, rtc, ??
interrupt-parent = <&mpic>;
rtc@32 {
- device_type = "rtc";
compatible = "ricoh,rs5c372a";
reg = <0x32>;
};
diff --git a/arch/powerpc/boot/dts/kuroboxHG.dts b/arch/powerpc/boot/dts/kuroboxHG.dts
index e4916e69ad31..b13a11eb81b0 100644
--- a/arch/powerpc/boot/dts/kuroboxHG.dts
+++ b/arch/powerpc/boot/dts/kuroboxHG.dts
@@ -76,7 +76,6 @@ XXXX add flash parts, rtc, ??
interrupt-parent = <&mpic>;
rtc@32 {
- device_type = "rtc";
compatible = "ricoh,rs5c372a";
reg = <0x32>;
};
diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts
index 2cf9a8768f44..3f7a5dce8de0 100644
--- a/arch/powerpc/boot/dts/lite5200.dts
+++ b/arch/powerpc/boot/dts/lite5200.dts
@@ -130,7 +130,6 @@
rtc@800 { // Real time clock
compatible = "fsl,mpc5200-rtc";
- device_type = "rtc";
reg = <0x800 0x100>;
interrupts = <1 5 0 1 6 0>;
interrupt-parent = <&mpc5200_pic>;
diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts
index 7bd5b9c399b8..63e3bb48e843 100644
--- a/arch/powerpc/boot/dts/lite5200b.dts
+++ b/arch/powerpc/boot/dts/lite5200b.dts
@@ -130,7 +130,6 @@
rtc@800 { // Real time clock
compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
- device_type = "rtc";
reg = <0x800 0x100>;
interrupts = <1 5 0 1 6 0>;
interrupt-parent = <&mpc5200_pic>;
diff --git a/arch/powerpc/boot/dts/motionpro.dts b/arch/powerpc/boot/dts/motionpro.dts
index 9e3c921be164..52ba6f98b273 100644
--- a/arch/powerpc/boot/dts/motionpro.dts
+++ b/arch/powerpc/boot/dts/motionpro.dts
@@ -248,7 +248,6 @@
fsl5200-clocking;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
};
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
index d2cdd47a246d..072c9b0f8c8e 100644
--- a/arch/powerpc/boot/dts/mpc8315erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
@@ -117,7 +117,6 @@
interrupt-parent = <&ipic>;
dfsrr;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
};
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts
index 712783d0707e..b5eda94a8e2a 100644
--- a/arch/powerpc/boot/dts/mpc8349emitx.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitx.dts
@@ -85,7 +85,6 @@
dfsrr;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
interrupts = <18 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8349emitxgp.dts b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
index 3e918af41fb1..c87a6015e165 100644
--- a/arch/powerpc/boot/dts/mpc8349emitxgp.dts
+++ b/arch/powerpc/boot/dts/mpc8349emitxgp.dts
@@ -83,7 +83,6 @@
dfsrr;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
interrupts = <18 0x8>;
diff --git a/arch/powerpc/boot/dts/mpc8377_rdb.dts b/arch/powerpc/boot/dts/mpc8377_rdb.dts
index 31f348fdfe14..9413af3b9925 100644
--- a/arch/powerpc/boot/dts/mpc8377_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8377_rdb.dts
@@ -117,7 +117,6 @@
interrupt-parent = <&ipic>;
dfsrr;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
};
diff --git a/arch/powerpc/boot/dts/mpc8378_rdb.dts b/arch/powerpc/boot/dts/mpc8378_rdb.dts
index 7a2bad038bd6..23c10ce22c2c 100644
--- a/arch/powerpc/boot/dts/mpc8378_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8378_rdb.dts
@@ -117,7 +117,6 @@
interrupt-parent = <&ipic>;
dfsrr;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
};
diff --git a/arch/powerpc/boot/dts/mpc8379_rdb.dts b/arch/powerpc/boot/dts/mpc8379_rdb.dts
index e067616f3f42..72cdc3c4c7e3 100644
--- a/arch/powerpc/boot/dts/mpc8379_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8379_rdb.dts
@@ -117,7 +117,6 @@
interrupt-parent = <&ipic>;
dfsrr;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1339";
reg = <0x68>;
};
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts
index 05f67253b49f..21459e161d02 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds.dts
@@ -63,6 +63,119 @@
device_type = "memory";
};
+ localbus@ffe05000 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8572-elbc", "fsl,elbc", "simple-bus";
+ reg = <0 0xffe05000 0 0x1000>;
+ interrupts = <19 2>;
+ interrupt-parent = <&mpic>;
+
+ ranges = <0x0 0x0 0x0 0xe8000000 0x08000000
+ 0x1 0x0 0x0 0xe0000000 0x08000000
+ 0x2 0x0 0x0 0xffa00000 0x00040000
+ 0x3 0x0 0x0 0xffdf0000 0x00008000
+ 0x4 0x0 0x0 0xffa40000 0x00040000
+ 0x5 0x0 0x0 0xffa80000 0x00040000
+ 0x6 0x0 0x0 0xffac0000 0x00040000>;
+
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x8000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ ramdisk@0 {
+ reg = <0x0 0x03000000>;
+ readl-only;
+ };
+
+ diagnostic@3000000 {
+ reg = <0x03000000 0x00e00000>;
+ read-only;
+ };
+
+ dink@3e00000 {
+ reg = <0x03e00000 0x00200000>;
+ read-only;
+ };
+
+ kernel@4000000 {
+ reg = <0x04000000 0x00400000>;
+ read-only;
+ };
+
+ jffs2@4400000 {
+ reg = <0x04400000 0x03b00000>;
+ };
+
+ dtb@7f00000 {
+ reg = <0x07f00000 0x00080000>;
+ read-only;
+ };
+
+ u-boot@7f80000 {
+ reg = <0x07f80000 0x00080000>;
+ read-only;
+ };
+ };
+
+ nand@2,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8572-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x2 0x0 0x40000>;
+
+ u-boot@0 {
+ reg = <0x0 0x02000000>;
+ read-only;
+ };
+
+ jffs2@2000000 {
+ reg = <0x02000000 0x10000000>;
+ };
+
+ ramdisk@12000000 {
+ reg = <0x12000000 0x08000000>;
+ read-only;
+ };
+
+ kernel@1a000000 {
+ reg = <0x1a000000 0x04000000>;
+ };
+
+ dtb@1e000000 {
+ reg = <0x1e000000 0x01000000>;
+ read-only;
+ };
+
+ empty@1f000000 {
+ reg = <0x1f000000 0x21000000>;
+ };
+ };
+
+ nand@4,0 {
+ compatible = "fsl,mpc8572-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x4 0x0 0x40000>;
+ };
+
+ nand@5,0 {
+ compatible = "fsl,mpc8572-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x5 0x0 0x40000>;
+ };
+
+ nand@6,0 {
+ compatible = "fsl,mpc8572-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x6 0x0 0x40000>;
+ };
+ };
+
soc8572@ffe00000 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
new file mode 100644
index 000000000000..c114c4ee9931
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
@@ -0,0 +1,483 @@
+/*
+ * MPC8572 DS Core0 Device Tree Source in CAMP mode.
+ *
+ * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
+ * can be shared, all the other devices must be assigned to one core only.
+ * This dts file allows core0 to have memory, l2, i2c, dma1, global-util, eth0,
+ * eth1, crypto, pci0, pci1.
+ *
+ * Copyright 2007, 2008 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+/ {
+ model = "fsl,MPC8572DS";
+ compatible = "fsl,MPC8572DS", "fsl,MPC8572DS-CAMP";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ aliases {
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ serial0 = &serial0;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,8572@0 {
+ device_type = "cpu";
+ reg = <0x0>;
+ d-cache-line-size = <32>; // 32 bytes
+ i-cache-line-size = <32>; // 32 bytes
+ d-cache-size = <0x8000>; // L1, 32K
+ i-cache-size = <0x8000>; // L1, 32K
+ timebase-frequency = <0>;
+ bus-frequency = <0>;
+ clock-frequency = <0>;
+ next-level-cache = <&L2>;
+ };
+
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>; // Filled by U-Boot
+ };
+
+ soc8572@ffe00000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+ ranges = <0x0 0xffe00000 0x100000>;
+ reg = <0xffe00000 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed
+ bus-frequency = <0>; // Filled out by uboot.
+
+ memory-controller@2000 {
+ compatible = "fsl,mpc8572-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <18 2>;
+ };
+
+ memory-controller@6000 {
+ compatible = "fsl,mpc8572-memory-controller";
+ reg = <0x6000 0x1000>;
+ interrupt-parent = <&mpic>;
+ interrupts = <18 2>;
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,mpc8572-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x80000>; // L2, 512K
+ interrupt-parent = <&mpic>;
+ interrupts = <16 2>;
+ };
+
+ i2c@3000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <0>;
+ compatible = "fsl-i2c";
+ reg = <0x3000 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ i2c@3100 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cell-index = <1>;
+ compatible = "fsl-i2c";
+ reg = <0x3100 0x100>;
+ interrupts = <43 2>;
+ interrupt-parent = <&mpic>;
+ dfsrr;
+ };
+
+ dma@21300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
+ reg = <0x21300 0x4>;
+ ranges = <0x0 0x21100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <20 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <21 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <22 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <23 2>;
+ };
+ };
+
+ mdio@24520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-mdio";
+ reg = <0x24520 0x20>;
+
+ phy0: ethernet-phy@0 {
+ interrupt-parent = <&mpic>;
+ interrupts = <10 1>;
+ reg = <0x0>;
+ };
+ phy1: ethernet-phy@1 {
+ interrupt-parent = <&mpic>;
+ interrupts = <10 1>;
+ reg = <0x1>;
+ };
+ };
+
+ enet0: ethernet@24000 {
+ cell-index = <0>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x24000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <29 2 30 2 34 2>;
+ interrupt-parent = <&mpic>;
+ phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ enet1: ethernet@25000 {
+ cell-index = <1>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x25000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <35 2 36 2 40 2>;
+ interrupt-parent = <&mpic>;
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ serial0: serial@4500 {
+ cell-index = <0>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4500 0x100>;
+ clock-frequency = <0>;
+ };
+
+ global-utilities@e0000 { //global utilities block
+ compatible = "fsl,mpc8572-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+
+ crypto@30000 {
+ compatible = "fsl,sec3.0", "fsl,sec2.4", "fsl,sec2.2",
+ "fsl,sec2.1", "fsl,sec2.0";
+ reg = <0x30000 0x10000>;
+ interrupts = <45 2 58 2>;
+ interrupt-parent = <&mpic>;
+ fsl,num-channels = <4>;
+ fsl,channel-fifo-len = <24>;
+ fsl,exec-units-mask = <0x9fe>;
+ fsl,descriptor-types-mask = <0x3ab0ebf>;
+ };
+
+ mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x40000 0x40000>;
+ compatible = "chrp,open-pic";
+ device_type = "open-pic";
+ protected-sources = <
+ 31 32 33 37 38 39 /* enet2 enet3 */
+ 76 77 78 79 27 42 /* dma2 pci2 serial*/
+ 0xe0 0xe1 0xe2 0xe3 /* msi */
+ 0xe4 0xe5 0xe6 0xe7
+ >;
+ };
+ };
+
+ pci0: pcie@ffe08000 {
+ cell-index = <0>;
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0xffe08000 0x1000>;
+ bus-range = <0 255>;
+ ranges = <0x2000000 0x0 0x80000000 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x0 0xffc00000 0x0 0x10000>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <24 2>;
+ interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x11 func 0 - PCI slot 1 */
+ 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 func 1 - PCI slot 1 */
+ 0x8900 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8900 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8900 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8900 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 func 2 - PCI slot 1 */
+ 0x8a00 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8a00 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8a00 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8a00 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 func 3 - PCI slot 1 */
+ 0x8b00 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8b00 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8b00 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8b00 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 func 4 - PCI slot 1 */
+ 0x8c00 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8c00 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8c00 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8c00 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 func 5 - PCI slot 1 */
+ 0x8d00 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8d00 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8d00 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8d00 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 func 6 - PCI slot 1 */
+ 0x8e00 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8e00 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8e00 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8e00 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x11 func 7 - PCI slot 1 */
+ 0x8f00 0x0 0x0 0x1 &mpic 0x2 0x1
+ 0x8f00 0x0 0x0 0x2 &mpic 0x3 0x1
+ 0x8f00 0x0 0x0 0x3 &mpic 0x4 0x1
+ 0x8f00 0x0 0x0 0x4 &mpic 0x1 0x1
+
+ /* IDSEL 0x12 func 0 - PCI slot 2 */
+ 0x9000 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9000 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9000 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9000 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ /* IDSEL 0x12 func 1 - PCI slot 2 */
+ 0x9100 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9100 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9100 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9100 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ /* IDSEL 0x12 func 2 - PCI slot 2 */
+ 0x9200 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9200 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9200 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9200 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ /* IDSEL 0x12 func 3 - PCI slot 2 */
+ 0x9300 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9300 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9300 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9300 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ /* IDSEL 0x12 func 4 - PCI slot 2 */
+ 0x9400 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9400 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9400 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9400 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ /* IDSEL 0x12 func 5 - PCI slot 2 */
+ 0x9500 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9500 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9500 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9500 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ /* IDSEL 0x12 func 6 - PCI slot 2 */
+ 0x9600 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9600 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9600 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9600 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ /* IDSEL 0x12 func 7 - PCI slot 2 */
+ 0x9700 0x0 0x0 0x1 &mpic 0x3 0x1
+ 0x9700 0x0 0x0 0x2 &mpic 0x4 0x1
+ 0x9700 0x0 0x0 0x3 &mpic 0x1 0x1
+ 0x9700 0x0 0x0 0x4 &mpic 0x2 0x1
+
+ // IDSEL 0x1c USB
+ 0xe000 0x0 0x0 0x1 &i8259 0xc 0x2
+ 0xe100 0x0 0x0 0x2 &i8259 0x9 0x2
+ 0xe200 0x0 0x0 0x3 &i8259 0xa 0x2
+ 0xe300 0x0 0x0 0x4 &i8259 0xb 0x2
+
+ // IDSEL 0x1d Audio
+ 0xe800 0x0 0x0 0x1 &i8259 0x6 0x2
+
+ // IDSEL 0x1e Legacy
+ 0xf000 0x0 0x0 0x1 &i8259 0x7 0x2
+ 0xf100 0x0 0x0 0x1 &i8259 0x7 0x2
+
+ // IDSEL 0x1f IDE/SATA
+ 0xf800 0x0 0x0 0x1 &i8259 0xe 0x2
+ 0xf900 0x0 0x0 0x1 &i8259 0x5 0x2
+
+ >;
+
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ uli1575@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ ranges = <0x2000000 0x0 0x80000000
+ 0x2000000 0x0 0x80000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ isa@1e {
+ device_type = "isa";
+ #interrupt-cells = <2>;
+ #size-cells = <1>;
+ #address-cells = <2>;
+ reg = <0xf000 0x0 0x0 0x0 0x0>;
+ ranges = <0x1 0x0 0x1000000 0x0 0x0
+ 0x1000>;
+ interrupt-parent = <&i8259>;
+
+ i8259: interrupt-controller@20 {
+ reg = <0x1 0x20 0x2
+ 0x1 0xa0 0x2
+ 0x1 0x4d0 0x2>;
+ interrupt-controller;
+ device_type = "interrupt-controller";
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ compatible = "chrp,iic";
+ interrupts = <9 2>;
+ interrupt-parent = <&mpic>;
+ };
+
+ i8042@60 {
+ #size-cells = <0>;
+ #address-cells = <1>;
+ reg = <0x1 0x60 0x1 0x1 0x64 0x1>;
+ interrupts = <1 3 12 3>;
+ interrupt-parent =
+ <&i8259>;
+
+ keyboard@0 {
+ reg = <0x0>;
+ compatible = "pnpPNP,303";
+ };
+
+ mouse@1 {
+ reg = <0x1>;
+ compatible = "pnpPNP,f03";
+ };
+ };
+
+ rtc@70 {
+ compatible = "pnpPNP,b00";
+ reg = <0x1 0x70 0x2>;
+ };
+
+ gpio@400 {
+ reg = <0x1 0x400 0x80>;
+ };
+ };
+ };
+ };
+
+ };
+
+ pci1: pcie@ffe09000 {
+ cell-index = <1>;
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0xffe09000 0x1000>;
+ bus-range = <0 255>;
+ ranges = <0x2000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x0 0xffc10000 0x0 0x10000>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <26 2>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x4 0x1
+ 0000 0x0 0x0 0x2 &mpic 0x5 0x1
+ 0000 0x0 0x0 0x3 &mpic 0x6 0x1
+ 0000 0x0 0x0 0x4 &mpic 0x7 0x1
+ >;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x2000000 0x0 0xa0000000
+ 0x2000000 0x0 0xa0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
new file mode 100644
index 000000000000..04ecda18d206
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
@@ -0,0 +1,234 @@
+/*
+ * MPC8572 DS Core1 Device Tree Source in CAMP mode.
+ *
+ * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
+ * can be shared, all the other devices must be assigned to one core only.
+ * This dts allows core1 to have l2, dma2, eth2, eth3, pci2, msi.
+ *
+ * Please note to add "-b 1" for core1's dts compiling.
+ *
+ * Copyright 2007, 2008 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+/ {
+ model = "fsl,MPC8572DS";
+ compatible = "fsl,MPC8572DS", "fsl,MPC8572DS-CAMP";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ aliases {
+ ethernet2 = &enet2;
+ ethernet3 = &enet3;
+ serial0 = &serial0;
+ pci2 = &pci2;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,8572@1 {
+ device_type = "cpu";
+ reg = <0x1>;
+ d-cache-line-size = <32>; // 32 bytes
+ i-cache-line-size = <32>; // 32 bytes
+ d-cache-size = <0x8000>; // L1, 32K
+ i-cache-size = <0x8000>; // L1, 32K
+ timebase-frequency = <0>;
+ bus-frequency = <0>;
+ clock-frequency = <0>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0>; // Filled by U-Boot
+ };
+
+ soc8572@ffe00000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "simple-bus";
+ ranges = <0x0 0xffe00000 0x100000>;
+ reg = <0xffe00000 0x1000>; // CCSRBAR & soc regs, remove once parse code for immrbase fixed
+ bus-frequency = <0>; // Filled out by uboot.
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,mpc8572-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x80000>; // L2, 512K
+ interrupt-parent = <&mpic>;
+ };
+
+ dma@c300 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,mpc8572-dma", "fsl,eloplus-dma";
+ reg = <0xc300 0x4>;
+ ranges = <0x0 0xc100 0x200>;
+ cell-index = <0>;
+ dma-channel@0 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x0 0x80>;
+ cell-index = <0>;
+ interrupt-parent = <&mpic>;
+ interrupts = <76 2>;
+ };
+ dma-channel@80 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x80 0x80>;
+ cell-index = <1>;
+ interrupt-parent = <&mpic>;
+ interrupts = <77 2>;
+ };
+ dma-channel@100 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x100 0x80>;
+ cell-index = <2>;
+ interrupt-parent = <&mpic>;
+ interrupts = <78 2>;
+ };
+ dma-channel@180 {
+ compatible = "fsl,mpc8572-dma-channel",
+ "fsl,eloplus-dma-channel";
+ reg = <0x180 0x80>;
+ cell-index = <3>;
+ interrupt-parent = <&mpic>;
+ interrupts = <79 2>;
+ };
+ };
+
+ mdio@24520 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,gianfar-mdio";
+ reg = <0x24520 0x20>;
+
+ phy2: ethernet-phy@2 {
+ interrupt-parent = <&mpic>;
+ reg = <0x2>;
+ };
+ phy3: ethernet-phy@3 {
+ interrupt-parent = <&mpic>;
+ reg = <0x3>;
+ };
+ };
+
+ enet2: ethernet@26000 {
+ cell-index = <2>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x26000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <31 2 32 2 33 2>;
+ interrupt-parent = <&mpic>;
+ phy-handle = <&phy2>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ enet3: ethernet@27000 {
+ cell-index = <3>;
+ device_type = "network";
+ model = "eTSEC";
+ compatible = "gianfar";
+ reg = <0x27000 0x1000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <37 2 38 2 39 2>;
+ interrupt-parent = <&mpic>;
+ phy-handle = <&phy3>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ msi@41600 {
+ compatible = "fsl,mpc8572-msi", "fsl,mpic-msi";
+ reg = <0x41600 0x80>;
+ msi-available-ranges = <0 0x100>;
+ interrupts = <
+ 0xe0 0
+ 0xe1 0
+ 0xe2 0
+ 0xe3 0
+ 0xe4 0
+ 0xe5 0
+ 0xe6 0
+ 0xe7 0>;
+ interrupt-parent = <&mpic>;
+ };
+
+ serial0: serial@4600 {
+ cell-index = <1>;
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x4600 0x100>;
+ clock-frequency = <0>;
+ };
+
+ mpic: pic@40000 {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0x40000 0x40000>;
+ compatible = "chrp,open-pic";
+ device_type = "open-pic";
+ protected-sources = <
+ 18 16 10 42 45 58 /* MEM L2 mdio serial crypto */
+ 29 30 34 35 36 40 /* enet0 enet1 */
+ 24 26 20 21 22 23 /* pcie0 pcie1 dma1 */
+ 43 /* i2c */
+ 0x1 0x2 0x3 0x4 /* pci slot */
+ 0x9 0xa 0xb 0xc /* usb */
+ 0x6 0x7 0xe 0x5 /* Audio elgacy SATA */
+ >;
+ };
+ };
+
+ pci2: pcie@ffe0a000 {
+ cell-index = <2>;
+ compatible = "fsl,mpc8548-pcie";
+ device_type = "pci";
+ #interrupt-cells = <1>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ reg = <0xffe0a000 0x1000>;
+ bus-range = <0 255>;
+ ranges = <0x2000000 0x0 0xc0000000 0xc0000000 0x0 0x20000000
+ 0x1000000 0x0 0x0 0xffc20000 0x0 0x10000>;
+ clock-frequency = <33333333>;
+ interrupt-parent = <&mpic>;
+ interrupts = <27 2>;
+ interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+ interrupt-map = <
+ /* IDSEL 0x0 */
+ 0000 0x0 0x0 0x1 &mpic 0x0 0x1
+ 0000 0x0 0x0 0x2 &mpic 0x1 0x1
+ 0000 0x0 0x0 0x3 &mpic 0x2 0x1
+ 0000 0x0 0x0 0x4 &mpic 0x3 0x1
+ >;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ #size-cells = <2>;
+ #address-cells = <3>;
+ device_type = "pci";
+ ranges = <0x2000000 0x0 0xc0000000
+ 0x2000000 0x0 0xc0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/pcm030.dts b/arch/powerpc/boot/dts/pcm030.dts
index 7c1bb952360c..be2c11ca0594 100644
--- a/arch/powerpc/boot/dts/pcm030.dts
+++ b/arch/powerpc/boot/dts/pcm030.dts
@@ -143,7 +143,6 @@
rtc@800 { // Real time clock
compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
- device_type = "rtc";
reg = <0x800 0x100>;
interrupts = <0x1 0x5 0x0 0x1 0x6 0x0>;
interrupt-parent = <&mpc5200_pic>;
@@ -301,7 +300,6 @@
interrupt-parent = <&mpc5200_pic>;
fsl5200-clocking;
rtc@51 {
- device_type = "rtc";
compatible = "nxp,pcf8563";
reg = <0x51>;
};
diff --git a/arch/powerpc/boot/dts/tqm5200.dts b/arch/powerpc/boot/dts/tqm5200.dts
index 3008bf8830c1..906302e26a62 100644
--- a/arch/powerpc/boot/dts/tqm5200.dts
+++ b/arch/powerpc/boot/dts/tqm5200.dts
@@ -181,7 +181,6 @@
fsl5200-clocking;
rtc@68 {
- device_type = "rtc";
compatible = "dallas,ds1307";
reg = <0x68>;
};
diff --git a/arch/powerpc/boot/libfdt-wrapper.c b/arch/powerpc/boot/libfdt-wrapper.c
index 9276327bc2bb..bb8b9b3505ee 100644
--- a/arch/powerpc/boot/libfdt-wrapper.c
+++ b/arch/powerpc/boot/libfdt-wrapper.c
@@ -185,7 +185,7 @@ void fdt_init(void *blob)
/* Make sure the dt blob is the right version and so forth */
fdt = blob;
- bufsize = fdt_totalsize(fdt) + 4;
+ bufsize = fdt_totalsize(fdt) + EXPAND_GRANULARITY;
buf = malloc(bufsize);
if(!buf)
fatal("malloc failed. can't relocate the device tree\n\r");
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index 07ccaf89f379..cd1ffa449327 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -1397,8 +1397,11 @@ CONFIG_USB_STORAGE=y
# CONFIG_ACCESSIBILITY is not set
# CONFIG_INFINIBAND is not set
# CONFIG_EDAC is not set
-CONFIG_RTC_LIB=m
-CONFIG_RTC_CLASS=m
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
#
# RTC interfaces
@@ -1424,6 +1427,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_M41T80 is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
+CONFIG_RTC_DRV_RX8581=y
#
# SPI RTC drivers
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index cfc94cfcf4cb..034a1fbdc887 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -267,7 +267,7 @@ CONFIG_PCI_SYSCALL=y
# CONFIG_PCIEPORTBUS is not set
CONFIG_ARCH_SUPPORTS_MSI=y
# CONFIG_PCI_MSI is not set
-CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_LEGACY is not set
# CONFIG_PCI_DEBUG is not set
# CONFIG_PCCARD is not set
# CONFIG_HOTPLUG_PCI is not set
@@ -354,7 +354,7 @@ CONFIG_IPV6_NDISC_NODETYPE=y
# CONFIG_IP_SCTP is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
-# CONFIG_BRIDGE is not set
+CONFIG_BRIDGE=m
# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
@@ -579,7 +579,7 @@ CONFIG_NETDEVICES=y
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
-# CONFIG_TUN is not set
+CONFIG_TUN=m
# CONFIG_VETH is not set
# CONFIG_ARCNET is not set
# CONFIG_PHYLIB is not set
@@ -1001,11 +1001,11 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# CONFIG_USB_TMC is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
#
#
-# may also be needed; see USB_STORAGE Help for more information
+# see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=m
# CONFIG_USB_STORAGE_DEBUG is not set
@@ -1418,6 +1418,6 @@ CONFIG_CRYPTO_LZO=m
# CONFIG_PPC_CLOCK is not set
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
-CONFIG_KVM_BOOKE_HOST=y
+CONFIG_KVM_440=y
# CONFIG_VIRTIO_PCI is not set
# CONFIG_VIRTIO_BALLOON is not set
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index f3fc733758f5..499be5bdd6fa 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -111,7 +111,7 @@ static __inline__ void atomic_inc(atomic_t *v)
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
- : "cc");
+ : "cc", "xer");
}
static __inline__ int atomic_inc_return(atomic_t *v)
@@ -128,7 +128,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
ISYNC_ON_SMP
: "=&r" (t)
: "r" (&v->counter)
- : "cc", "memory");
+ : "cc", "xer", "memory");
return t;
}
@@ -155,7 +155,7 @@ static __inline__ void atomic_dec(atomic_t *v)
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
- : "cc");
+ : "cc", "xer");
}
static __inline__ int atomic_dec_return(atomic_t *v)
@@ -172,7 +172,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
ISYNC_ON_SMP
: "=&r" (t)
: "r" (&v->counter)
- : "cc", "memory");
+ : "cc", "xer", "memory");
return t;
}
@@ -346,7 +346,7 @@ static __inline__ void atomic64_inc(atomic64_t *v)
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
- : "cc");
+ : "cc", "xer");
}
static __inline__ long atomic64_inc_return(atomic64_t *v)
@@ -362,7 +362,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
ISYNC_ON_SMP
: "=&r" (t)
: "r" (&v->counter)
- : "cc", "memory");
+ : "cc", "xer", "memory");
return t;
}
@@ -388,7 +388,7 @@ static __inline__ void atomic64_dec(atomic64_t *v)
bne- 1b"
: "=&r" (t), "+m" (v->counter)
: "r" (&v->counter)
- : "cc");
+ : "cc", "xer");
}
static __inline__ long atomic64_dec_return(atomic64_t *v)
@@ -404,7 +404,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
ISYNC_ON_SMP
: "=&r" (t)
: "r" (&v->counter)
- : "cc", "memory");
+ : "cc", "xer", "memory");
return t;
}
@@ -431,7 +431,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
"\n\
2:" : "=&r" (t)
: "r" (&v->counter)
- : "cc", "memory");
+ : "cc", "xer", "memory");
return t;
}
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index e55d1f66b86f..64e1fdca233e 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -3,6 +3,7 @@
#ifdef __KERNEL__
#include <asm/asm-compat.h>
+
/*
* Define an illegal instr to trap on the bug.
* We don't use 0 because that marks the end of a function
@@ -14,6 +15,7 @@
#ifdef CONFIG_BUG
#ifdef __ASSEMBLY__
+#include <asm/asm-offsets.h>
#ifdef CONFIG_DEBUG_BUGVERBOSE
.macro EMIT_BUG_ENTRY addr,file,line,flags
.section __bug_table,"a"
@@ -26,7 +28,7 @@
.previous
.endm
#else
- .macro EMIT_BUG_ENTRY addr,file,line,flags
+.macro EMIT_BUG_ENTRY addr,file,line,flags
.section __bug_table,"a"
5001: PPC_LONG \addr
.short \flags
@@ -113,6 +115,13 @@
#define HAVE_ARCH_BUG_ON
#define HAVE_ARCH_WARN_ON
#endif /* __ASSEMBLY __ */
+#else
+#ifdef __ASSEMBLY__
+.macro EMIT_BUG_ENTRY addr,file,line,flags
+.endm
+#else /* !__ASSEMBLY__ */
+#define _EMIT_BUG_ENTRY
+#endif
#endif /* CONFIG_BUG */
#include <asm-generic/bug.h>
diff --git a/arch/powerpc/include/asm/byteorder.h b/arch/powerpc/include/asm/byteorder.h
index b37752214a16..d5de325472e9 100644
--- a/arch/powerpc/include/asm/byteorder.h
+++ b/arch/powerpc/include/asm/byteorder.h
@@ -11,6 +11,8 @@
#include <asm/types.h>
#include <linux/compiler.h>
+#define __BIG_ENDIAN
+
#ifdef __GNUC__
#ifdef __KERNEL__
@@ -21,12 +23,19 @@ static __inline__ __u16 ld_le16(const volatile __u16 *addr)
__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
return val;
}
+#define __arch_swab16p ld_le16
static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
{
__asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
}
+static inline void __arch_swab16s(__u16 *addr)
+{
+ st_le16(addr, *addr);
+}
+#define __arch_swab16s __arch_swab16s
+
static __inline__ __u32 ld_le32(const volatile __u32 *addr)
{
__u32 val;
@@ -34,13 +43,20 @@ static __inline__ __u32 ld_le32(const volatile __u32 *addr)
__asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
return val;
}
+#define __arch_swab32p ld_le32
static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
{
__asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
}
-static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value)
+static inline void __arch_swab32s(__u32 *addr)
+{
+ st_le32(addr, *addr);
+}
+#define __arch_swab32s __arch_swab32s
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 value)
{
__u16 result;
@@ -49,8 +65,9 @@ static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value)
: "r" (value), "0" (value >> 8));
return result;
}
+#define __arch_swab16 __arch_swab16
-static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
+static inline __attribute_const__ __u32 __arch_swab32(__u32 value)
{
__u32 result;
@@ -61,29 +78,16 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
: "r" (value), "0" (value >> 24));
return result;
}
-
-#define __arch__swab16(x) ___arch__swab16(x)
-#define __arch__swab32(x) ___arch__swab32(x)
-
-/* The same, but returns converted value from the location pointer by addr. */
-#define __arch__swab16p(addr) ld_le16(addr)
-#define __arch__swab32p(addr) ld_le32(addr)
-
-/* The same, but do the conversion in situ, ie. put the value back to addr. */
-#define __arch__swab16s(addr) st_le16(addr,*addr)
-#define __arch__swab32s(addr) st_le32(addr,*addr)
+#define __arch_swab32 __arch_swab32
#endif /* __KERNEL__ */
-#ifndef __STRICT_ANSI__
-#define __BYTEORDER_HAS_U64__
#ifndef __powerpc64__
#define __SWAB_64_THRU_32__
#endif /* __powerpc64__ */
-#endif /* __STRICT_ANSI__ */
#endif /* __GNUC__ */
-#include <linux/byteorder/big_endian.h>
+#include <linux/byteorder.h>
#endif /* _ASM_POWERPC_BYTEORDER_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 1e94b07a020e..4911104791c3 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -82,6 +82,7 @@ struct cpu_spec {
char *cpu_name;
unsigned long cpu_features; /* Kernel features */
unsigned int cpu_user_features; /* Userland features */
+ unsigned int mmu_features; /* MMU features */
/* cache line sizes */
unsigned int icache_bsize;
@@ -144,17 +145,14 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
#define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080)
#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
-#define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200)
#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000)
-#define CPU_FTR_HAS_HIGH_BATS ASM_CONST(0x0000000000010000)
#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
-#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
@@ -163,6 +161,8 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_SPE ASM_CONST(0x0000000002000000)
#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000)
#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
+#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000)
+#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000)
/*
* Add the 64-bit processor unique features in the top half of the word;
@@ -177,7 +177,6 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000)
#define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000)
#define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000)
-#define CPU_FTR_NOEXECUTE LONG_ASM_CONST(0x0000000800000000)
#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000)
@@ -194,6 +193,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000)
#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000)
#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000)
+#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000)
#ifndef __ASSEMBLY__
@@ -264,164 +264,159 @@ extern const char *powerpc_base_platform;
!defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
!defined(CONFIG_BOOKE))
-#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE | \
+#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE)
#define CPU_FTRS_603 (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
#define CPU_FTRS_604 (CPU_FTR_COMMON | \
- CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_PPC_LE)
+ CPU_FTR_USE_TB | CPU_FTR_PPC_LE)
#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
#define CPU_FTRS_740 (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
- CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_PPC_LE)
#define CPU_FTRS_750 (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
- CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_PPC_LE)
-#define CPU_FTRS_750CL (CPU_FTRS_750 | CPU_FTR_HAS_HIGH_BATS)
+#define CPU_FTRS_750CL (CPU_FTRS_750)
#define CPU_FTRS_750FX1 (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM)
#define CPU_FTRS_750FX2 (CPU_FTRS_750 | CPU_FTR_NO_DPM)
-#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | \
- CPU_FTR_HAS_HIGH_BATS)
+#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX)
#define CPU_FTRS_750GX (CPU_FTRS_750FX)
#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
- CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \
+ CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
#define CPU_FTRS_7400 (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
- CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \
+ CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS | \
- CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
+ CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
- CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE)
+ CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
#define CPU_FTRS_7455 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \
CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_7447 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_7447A (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_7448 (CPU_FTR_COMMON | \
CPU_FTR_USE_TB | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
- CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_82XX (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB)
#define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
- CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS)
+ CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP)
#define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \
- CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_COMMON)
#define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \
- CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
+ CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
-#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | \
- CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE)
+#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_USE_TB)
#define CPU_FTRS_8XX (CPU_FTR_USE_TB)
-#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN)
-#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN)
+#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
+ CPU_FTR_INDEXED_DCR)
#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
- CPU_FTR_UNIFIED_ID_CACHE)
+ CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
- CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN)
+ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_NOEXECUTE)
#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
- CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | \
- CPU_FTR_NODSISRALIGN)
+ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
+ CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \
- CPU_FTR_L2CSR | CPU_FTR_LWSYNC)
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE)
+ CPU_FTR_IABR | CPU_FTR_PPC_LE)
#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \
+ CPU_FTR_IABR | \
CPU_FTR_MMCRA | CPU_FTR_CTRL)
#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ)
#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
CPU_FTR_CP_USE_DCBTZ)
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR)
#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
- CPU_FTR_DSCR)
+ CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD)
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR | CPU_FTR_SAO)
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \
- CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ)
+ CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
+ CPU_FTR_UNALIGNED_LD_STD)
#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
+ CPU_FTR_PPCAS_ARCH_V2 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
-#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | \
- CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
+#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
#ifdef __powerpc64__
#define CPU_FTRS_POSSIBLE \
@@ -452,7 +447,7 @@ enum {
CPU_FTRS_40X |
#endif
#ifdef CONFIG_44x
- CPU_FTRS_44X |
+ CPU_FTRS_44X | CPU_FTRS_440x6 |
#endif
#ifdef CONFIG_E200
CPU_FTRS_E200 |
@@ -492,7 +487,7 @@ enum {
CPU_FTRS_40X &
#endif
#ifdef CONFIG_44x
- CPU_FTRS_44X &
+ CPU_FTRS_44X & CPU_FTRS_440x6 &
#endif
#ifdef CONFIG_E200
CPU_FTRS_E200 &
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 72d2b72c7390..7d2e6235726d 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -23,6 +23,7 @@
#ifndef __ASSEMBLY__
#include <linux/spinlock.h>
+#include <asm/cputable.h>
typedef struct {
unsigned int base;
@@ -39,23 +40,45 @@ static inline bool dcr_map_ok_native(dcr_host_native_t host)
#define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base)
#define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value)
-/* Device Control Registers */
-void __mtdcr(int reg, unsigned int val);
-unsigned int __mfdcr(int reg);
+/* Table based DCR accessors */
+extern void __mtdcr(unsigned int reg, unsigned int val);
+extern unsigned int __mfdcr(unsigned int reg);
+
+/* mfdcrx/mtdcrx instruction based accessors. We hand code
+ * the opcodes in order not to depend on newer binutils
+ */
+static inline unsigned int mfdcrx(unsigned int reg)
+{
+ unsigned int ret;
+ asm volatile(".long 0x7c000206 | (%0 << 21) | (%1 << 16)"
+ : "=r" (ret) : "r" (reg));
+ return ret;
+}
+
+static inline void mtdcrx(unsigned int reg, unsigned int val)
+{
+ asm volatile(".long 0x7c000306 | (%0 << 21) | (%1 << 16)"
+ : : "r" (val), "r" (reg));
+}
+
#define mfdcr(rn) \
({unsigned int rval; \
- if (__builtin_constant_p(rn)) \
+ if (__builtin_constant_p(rn) && rn < 1024) \
asm volatile("mfdcr %0," __stringify(rn) \
: "=r" (rval)); \
+ else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
+ rval = mfdcrx(rn); \
else \
rval = __mfdcr(rn); \
rval;})
#define mtdcr(rn, v) \
do { \
- if (__builtin_constant_p(rn)) \
+ if (__builtin_constant_p(rn) && rn < 1024) \
asm volatile("mtdcr " __stringify(rn) ",%0" \
: : "r" (v)); \
+ else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
+ mtdcrx(rn, v); \
else \
__mtdcr(rn, v); \
} while (0)
@@ -69,8 +92,13 @@ static inline unsigned __mfdcri(int base_addr, int base_data, int reg)
unsigned int val;
spin_lock_irqsave(&dcr_ind_lock, flags);
- __mtdcr(base_addr, reg);
- val = __mfdcr(base_data);
+ if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
+ mtdcrx(base_addr, reg);
+ val = mfdcrx(base_data);
+ } else {
+ __mtdcr(base_addr, reg);
+ val = __mfdcr(base_data);
+ }
spin_unlock_irqrestore(&dcr_ind_lock, flags);
return val;
}
@@ -81,8 +109,13 @@ static inline void __mtdcri(int base_addr, int base_data, int reg,
unsigned long flags;
spin_lock_irqsave(&dcr_ind_lock, flags);
- __mtdcr(base_addr, reg);
- __mtdcr(base_data, val);
+ if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
+ mtdcrx(base_addr, reg);
+ mtdcrx(base_data, val);
+ } else {
+ __mtdcr(base_addr, reg);
+ __mtdcr(base_data, val);
+ }
spin_unlock_irqrestore(&dcr_ind_lock, flags);
}
@@ -93,9 +126,15 @@ static inline void __dcri_clrset(int base_addr, int base_data, int reg,
unsigned int val;
spin_lock_irqsave(&dcr_ind_lock, flags);
- __mtdcr(base_addr, reg);
- val = (__mfdcr(base_data) & ~clr) | set;
- __mtdcr(base_data, val);
+ if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
+ mtdcrx(base_addr, reg);
+ val = (mfdcrx(base_data) & ~clr) | set;
+ mtdcrx(base_data, val);
+ } else {
+ __mtdcr(base_addr, reg);
+ val = (__mfdcr(base_data) & ~clr) | set;
+ __mtdcr(base_data, val);
+ }
spin_unlock_irqrestore(&dcr_ind_lock, flags);
}
diff --git a/arch/powerpc/include/asm/dcr.h b/arch/powerpc/include/asm/dcr.h
index d13fb68bb5c0..9d6851cfb841 100644
--- a/arch/powerpc/include/asm/dcr.h
+++ b/arch/powerpc/include/asm/dcr.h
@@ -68,9 +68,9 @@ typedef dcr_host_mmio_t dcr_host_t;
* additional helpers to read the DCR * base from the device-tree
*/
struct device_node;
-extern unsigned int dcr_resource_start(struct device_node *np,
+extern unsigned int dcr_resource_start(const struct device_node *np,
unsigned int index);
-extern unsigned int dcr_resource_len(struct device_node *np,
+extern unsigned int dcr_resource_len(const struct device_node *np,
unsigned int index);
#endif /* CONFIG_PPC_DCR */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index dfd504caccc1..7d2277cef09a 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -18,4 +18,16 @@ struct dev_archdata {
void *dma_data;
};
+static inline void dev_archdata_set_node(struct dev_archdata *ad,
+ struct device_node *np)
+{
+ ad->of_node = np;
+}
+
+static inline struct device_node *
+dev_archdata_get_node(const struct dev_archdata *ad)
+{
+ return ad->of_node;
+}
+
#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index fddb229bd74f..86cef7ddc8d5 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -60,12 +60,6 @@ struct dma_mapping_ops {
dma_addr_t *dma_handle, gfp_t flag);
void (*free_coherent)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
- dma_addr_t (*map_single)(struct device *dev, void *ptr,
- size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs);
- void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs);
int (*map_sg)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
struct dma_attrs *attrs);
@@ -82,6 +76,22 @@ struct dma_mapping_ops {
dma_addr_t dma_address, size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs);
+#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
+ void (*sync_single_range_for_cpu)(struct device *hwdev,
+ dma_addr_t dma_handle, unsigned long offset,
+ size_t size,
+ enum dma_data_direction direction);
+ void (*sync_single_range_for_device)(struct device *hwdev,
+ dma_addr_t dma_handle, unsigned long offset,
+ size_t size,
+ enum dma_data_direction direction);
+ void (*sync_sg_for_cpu)(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction);
+ void (*sync_sg_for_device)(struct device *hwdev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction);
+#endif
};
/*
@@ -149,10 +159,9 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
}
/*
- * TODO: map_/unmap_single will ideally go away, to be completely
- * replaced by map/unmap_page. Until then, we allow dma_ops to have
- * one or the other, or both by checking to see if the specific
- * function requested exists; and if not, falling back on the other set.
+ * map_/unmap_single actually call through to map/unmap_page now that all the
+ * dma_mapping_ops have been converted over. We just have to get the page and
+ * offset to pass through to map_page
*/
static inline dma_addr_t dma_map_single_attrs(struct device *dev,
void *cpu_addr,
@@ -164,10 +173,6 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
BUG_ON(!dma_ops);
- if (dma_ops->map_single)
- return dma_ops->map_single(dev, cpu_addr, size, direction,
- attrs);
-
return dma_ops->map_page(dev, virt_to_page(cpu_addr),
(unsigned long)cpu_addr % PAGE_SIZE, size,
direction, attrs);
@@ -183,11 +188,6 @@ static inline void dma_unmap_single_attrs(struct device *dev,
BUG_ON(!dma_ops);
- if (dma_ops->unmap_single) {
- dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
- return;
- }
-
dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
}
@@ -201,12 +201,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
BUG_ON(!dma_ops);
- if (dma_ops->map_page)
- return dma_ops->map_page(dev, page, offset, size, direction,
- attrs);
-
- return dma_ops->map_single(dev, page_address(page) + offset, size,
- direction, attrs);
+ return dma_ops->map_page(dev, page, offset, size, direction, attrs);
}
static inline void dma_unmap_page_attrs(struct device *dev,
@@ -219,12 +214,7 @@ static inline void dma_unmap_page_attrs(struct device *dev,
BUG_ON(!dma_ops);
- if (dma_ops->unmap_page) {
- dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
- return;
- }
-
- dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
+ dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
}
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
@@ -308,47 +298,107 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
}
+#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
static inline void dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
- __dma_sync(bus_to_virt(dma_handle), size, direction);
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
+ size, direction);
}
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
- BUG_ON(direction == DMA_NONE);
- __dma_sync(bus_to_virt(dma_handle), size, direction);
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ dma_ops->sync_single_range_for_device(dev, dma_handle,
+ 0, size, direction);
}
static inline void dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
- struct scatterlist *sg;
- int i;
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
- BUG_ON(direction == DMA_NONE);
+ BUG_ON(!dma_ops);
+ dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
+}
+
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nents,
+ enum dma_data_direction direction)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
- for_each_sg(sgl, sg, nents, i)
- __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+ BUG_ON(!dma_ops);
+ dma_ops->sync_single_range_for_cpu(dev, dma_handle,
+ offset, size, direction);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+ dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
+ size, direction);
+}
+#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
+static inline void dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+}
+
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nents,
+ enum dma_data_direction direction)
+{
}
static inline void dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
- struct scatterlist *sg;
- int i;
+}
- BUG_ON(direction == DMA_NONE);
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+}
- for_each_sg(sgl, sg, nents, i)
- __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
}
+#endif
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
@@ -382,22 +432,6 @@ static inline int dma_get_cache_alignment(void)
#endif
}
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- /* just sync everything for now */
- dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- /* just sync everything for now */
- dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
-}
-
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index b886bec67016..66ea9b8b95c5 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -17,8 +17,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef _PPC64_EEH_H
-#define _PPC64_EEH_H
+#ifndef _POWERPC_EEH_H
+#define _POWERPC_EEH_H
#ifdef __KERNEL__
#include <linux/init.h>
@@ -110,6 +110,7 @@ static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
#define EEH_IO_ERROR_VALUE(size) (-1UL)
#endif /* CONFIG_EEH */
+#ifdef CONFIG_PPC64
/*
* MMIO read/write operations with EEH support.
*/
@@ -207,5 +208,6 @@ static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
eeh_check_failure(addr, *(u32*)buf);
}
+#endif /* CONFIG_PPC64 */
#endif /* __KERNEL__ */
-#endif /* _PPC64_EEH_H */
+#endif /* _POWERPC_EEH_H */
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index a1029967620b..e4094a5cb05b 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -81,6 +81,36 @@ label##5: \
#define ALT_FTR_SECTION_END_IFCLR(msk) \
ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
+/* MMU feature dependent sections */
+#define BEGIN_MMU_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
+#define BEGIN_MMU_FTR_SECTION START_FTR_SECTION(97)
+
+#define END_MMU_FTR_SECTION_NESTED(msk, val, label) \
+ FTR_SECTION_ELSE_NESTED(label) \
+ MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
+
+#define END_MMU_FTR_SECTION(msk, val) \
+ END_MMU_FTR_SECTION_NESTED(msk, val, 97)
+
+#define END_MMU_FTR_SECTION_IFSET(msk) END_MMU_FTR_SECTION((msk), (msk))
+#define END_MMU_FTR_SECTION_IFCLR(msk) END_MMU_FTR_SECTION((msk), 0)
+
+/* MMU feature sections with alternatives, use BEGIN_FTR_SECTION to start */
+#define MMU_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label)
+#define MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE_NESTED(97)
+#define ALT_MMU_FTR_SECTION_END_NESTED(msk, val, label) \
+ MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
+#define ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, label) \
+ ALT_MMU_FTR_SECTION_END_NESTED(msk, msk, label)
+#define ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, label) \
+ ALT_MMU_FTR_SECTION_END_NESTED(msk, 0, label)
+#define ALT_MMU_FTR_SECTION_END(msk, val) \
+ ALT_MMU_FTR_SECTION_END_NESTED(msk, val, 97)
+#define ALT_MMU_FTR_SECTION_END_IFSET(msk) \
+ ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, 97)
+#define ALT_MMU_FTR_SECTION_END_IFCLR(msk) \
+ ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
+
/* Firmware feature dependent sections */
#define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
#define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97)
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 91c589520c0a..04e4a620952e 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -38,9 +38,24 @@ extern pte_t *pkmap_page_table;
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
-#define LAST_PKMAP (1 << PTE_SHIFT)
-#define LAST_PKMAP_MASK (LAST_PKMAP-1)
+/*
+ * We use one full pte table with 4K pages. And with 16K/64K pages pte
+ * table covers enough memory (32MB and 512MB resp.) that both FIXMAP
+ * and PKMAP can be placed in single pte table. We use 1024 pages for
+ * PKMAP in case of 16K/64K pages.
+ */
+#ifdef CONFIG_PPC_4K_PAGES
+#define PKMAP_ORDER PTE_SHIFT
+#else
+#define PKMAP_ORDER 10
+#endif
+#define LAST_PKMAP (1 << PKMAP_ORDER)
+#ifndef CONFIG_PPC_4K_PAGES
+#define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
+#else
#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
+#endif
+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
@@ -85,7 +100,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
- flush_tlb_page(NULL, vaddr);
+ local_flush_tlb_page(NULL, vaddr);
return (void*) vaddr;
}
@@ -113,7 +128,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
- flush_tlb_page(NULL, vaddr);
+ local_flush_tlb_page(NULL, vaddr);
#endif
pagefault_enable();
}
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 08266d2728b3..494cd8b0a278 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -713,13 +713,6 @@ static inline void * phys_to_virt(unsigned long address)
*/
#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-/* We do NOT want virtual merging, it would put too much pressure on
- * our iommu allocator. Instead, we want drivers to be smart enough
- * to coalesce sglists that happen to have been mapped in a contiguous
- * way by the iommu
- */
-#define BIO_VMERGE_BOUNDARY 0
-
/*
* 32 bits still uses virt_to_bus() for it's implementation of DMA
* mappings se we have to keep it defined here. We also have some old
diff --git a/arch/powerpc/include/asm/kdump.h b/arch/powerpc/include/asm/kdump.h
index b07ebb9784d3..5ebfe5d3c61f 100644
--- a/arch/powerpc/include/asm/kdump.h
+++ b/arch/powerpc/include/asm/kdump.h
@@ -1,6 +1,8 @@
#ifndef _PPC64_KDUMP_H
#define _PPC64_KDUMP_H
+#include <asm/page.h>
+
/* Kdump kernel runs at 32 MB, change at your peril. */
#define KDUMP_KERNELBASE 0x2000000
@@ -11,8 +13,19 @@
#ifdef CONFIG_CRASH_DUMP
+/*
+ * On PPC64 translation is disabled during trampoline setup, so we use
+ * physical addresses. Though on PPC32 translation is already enabled,
+ * so we can't do the same. Luckily create_trampoline() creates relative
+ * branches, so we can just add the PAGE_OFFSET and don't worry about it.
+ */
+#ifdef __powerpc64__
#define KDUMP_TRAMPOLINE_START 0x0100
#define KDUMP_TRAMPOLINE_END 0x3000
+#else
+#define KDUMP_TRAMPOLINE_START (0x0100 + PAGE_OFFSET)
+#define KDUMP_TRAMPOLINE_END (0x3000 + PAGE_OFFSET)
+#endif /* __powerpc64__ */
#define KDUMP_MIN_TCE_ENTRIES 2048
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 3736d9b33289..6dbffc981702 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -33,12 +33,12 @@
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
+#include <asm/reg.h>
typedef void (*crash_shutdown_t)(void);
#ifdef CONFIG_KEXEC
-#ifdef __powerpc64__
/*
* This function is responsible for capturing register states if coming
* via panic or invoking dump using sysrq-trigger.
@@ -48,6 +48,7 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
{
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
+#ifdef __powerpc64__
else {
/* FIXME Merge this with xmon_save_regs ?? */
unsigned long tmp1, tmp2;
@@ -100,15 +101,11 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
: "b" (newregs)
: "memory");
}
-}
#else
-/*
- * Provide a dummy definition to avoid build failures. Will remain
- * empty till crash dump support is enabled.
- */
-static inline void crash_setup_regs(struct pt_regs *newregs,
- struct pt_regs *oldregs) { }
-#endif /* !__powerpc64 __ */
+ else
+ ppc_save_regs(newregs);
+#endif /* __powerpc64__ */
+}
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index 612d83276653..84b457a3c1bc 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -67,7 +67,7 @@ static __inline__ long local_inc_return(local_t *l)
bne- 1b"
: "=&r" (t)
: "r" (&(l->a.counter))
- : "cc", "memory");
+ : "cc", "xer", "memory");
return t;
}
@@ -94,7 +94,7 @@ static __inline__ long local_dec_return(local_t *l)
bne- 1b"
: "=&r" (t)
: "r" (&(l->a.counter))
- : "cc", "memory");
+ : "cc", "xer", "memory");
return t;
}
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 2fe268b10333..25aaa97facd8 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -133,7 +133,8 @@ struct lppaca {
//=============================================================================
// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data
//=============================================================================
- u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF
+ u32 page_ins; // CMO Hint - # page ins by OS x00-x04
+ u8 pmc_save_area[252]; // PMC interrupt Area x04-xFF
} __attribute__((__aligned__(0x400)));
extern struct lppaca lppaca[];
diff --git a/arch/powerpc/include/asm/mmu-40x.h b/arch/powerpc/include/asm/mmu-40x.h
index 3d108676584c..776f415a36aa 100644
--- a/arch/powerpc/include/asm/mmu-40x.h
+++ b/arch/powerpc/include/asm/mmu-40x.h
@@ -54,8 +54,9 @@
#ifndef __ASSEMBLY__
typedef struct {
- unsigned long id;
- unsigned long vdso_base;
+ unsigned int id;
+ unsigned int active;
+ unsigned long vdso_base;
} mm_context_t;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index a825524c981a..8a97cfb08b7e 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -4,6 +4,8 @@
* PPC440 support
*/
+#include <asm/page.h>
+
#define PPC44x_MMUCR_TID 0x000000ff
#define PPC44x_MMUCR_STS 0x00010000
@@ -56,8 +58,9 @@
extern unsigned int tlb_44x_hwater;
typedef struct {
- unsigned long id;
- unsigned long vdso_base;
+ unsigned int id;
+ unsigned int active;
+ unsigned long vdso_base;
} mm_context_t;
#endif /* !__ASSEMBLY__ */
@@ -73,4 +76,19 @@ typedef struct {
/* Size of the TLBs used for pinning in lowmem */
#define PPC_PIN_SIZE (1 << 28) /* 256M */
+#if (PAGE_SHIFT == 12)
+#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
+#elif (PAGE_SHIFT == 14)
+#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
+#elif (PAGE_SHIFT == 16)
+#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
+#else
+#error "Unsupported PAGE_SIZE"
+#endif
+
+#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2)
+#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2)
+#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2)
+#define PPC44x_PTE_ADD_MASK_BIT (32 - PTE_T_LOG2 - PTE_SHIFT)
+
#endif /* _ASM_POWERPC_MMU_44X_H_ */
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 9db877eb88db..07865a357848 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -137,7 +137,8 @@
#ifndef __ASSEMBLY__
typedef struct {
- unsigned long id;
+ unsigned int id;
+ unsigned int active;
unsigned long vdso_base;
} mm_context_t;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu-fsl-booke.h b/arch/powerpc/include/asm/mmu-fsl-booke.h
index 925d93cf64d8..3f941c0f7e8e 100644
--- a/arch/powerpc/include/asm/mmu-fsl-booke.h
+++ b/arch/powerpc/include/asm/mmu-fsl-booke.h
@@ -40,6 +40,8 @@
#define MAS2_M 0x00000004
#define MAS2_G 0x00000002
#define MAS2_E 0x00000001
+#define MAS2_EPN_MASK(size) (~0 << (2*(size) + 10))
+#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags))
#define MAS3_RPN 0xFFFFF000
#define MAS3_U0 0x00000200
@@ -74,8 +76,9 @@
#ifndef __ASSEMBLY__
typedef struct {
- unsigned long id;
- unsigned long vdso_base;
+ unsigned int id;
+ unsigned int active;
+ unsigned long vdso_base;
} mm_context_t;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 4c0e1b4f975c..6e7639911318 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -2,6 +2,63 @@
#define _ASM_POWERPC_MMU_H_
#ifdef __KERNEL__
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+
+/*
+ * MMU features bit definitions
+ */
+
+/*
+ * First half is MMU families
+ */
+#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001)
+#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002)
+#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
+#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
+#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
+
+/*
+ * This is individual features
+ */
+
+/* Enable use of high BAT registers */
+#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000)
+
+/* Enable >32-bit physical addresses on 32-bit processor, only used
+ * by CONFIG_6xx currently as BookE supports that from day 1
+ */
+#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000)
+
+/* Enable use of broadcast TLB invalidations. We don't always set it
+ * on processors that support it due to other constraints with the
+ * use of such invalidations
+ */
+#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000)
+
+/* Enable use of tlbilx invalidate-by-PID variant.
+ */
+#define MMU_FTR_USE_TLBILX_PID ASM_CONST(0x00080000)
+
+/* This indicates that the processor cannot handle multiple outstanding
+ * broadcast tlbivax or tlbsync. This makes the code use a spinlock
+ * around such invalidate forms.
+ */
+#define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000)
+
+#ifndef __ASSEMBLY__
+#include <asm/cputable.h>
+
+static inline int mmu_has_feature(unsigned long feature)
+{
+ return (cur_cpu_spec->mmu_features & feature);
+}
+
+extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
+
+#endif /* !__ASSEMBLY__ */
+
+
#ifdef CONFIG_PPC64
/* 64-bit classic hash table MMU */
# include <asm/mmu-hash64.h>
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 6b993ef452ff..ab4f19263c42 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -2,237 +2,26 @@
#define __ASM_POWERPC_MMU_CONTEXT_H
#ifdef __KERNEL__
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
#include <asm/mmu.h>
#include <asm/cputable.h>
#include <asm-generic/mm_hooks.h>
-
-#ifndef CONFIG_PPC64
-#include <asm/atomic.h>
-#include <linux/bitops.h>
-
-/*
- * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
- * (virtual segment identifiers) for each context. Although the
- * hardware supports 24-bit VSIDs, and thus >1 million contexts,
- * we only use 32,768 of them. That is ample, since there can be
- * at most around 30,000 tasks in the system anyway, and it means
- * that we can use a bitmap to indicate which contexts are in use.
- * Using a bitmap means that we entirely avoid all of the problems
- * that we used to have when the context number overflowed,
- * particularly on SMP systems.
- * -- paulus.
- */
-
-/*
- * This function defines the mapping from contexts to VSIDs (virtual
- * segment IDs). We use a skew on both the context and the high 4 bits
- * of the 32-bit virtual address (the "effective segment ID") in order
- * to spread out the entries in the MMU hash table. Note, if this
- * function is changed then arch/ppc/mm/hashtable.S will have to be
- * changed to correspond.
- */
-#define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
- & 0xffffff)
-
-/*
- The MPC8xx has only 16 contexts. We rotate through them on each
- task switch. A better way would be to keep track of tasks that
- own contexts, and implement an LRU usage. That way very active
- tasks don't always have to pay the TLB reload overhead. The
- kernel pages are mapped shared, so the kernel can run on behalf
- of any task that makes a kernel entry. Shared does not mean they
- are not protected, just that the ASID comparison is not performed.
- -- Dan
-
- The IBM4xx has 256 contexts, so we can just rotate through these
- as a way of "switching" contexts. If the TID of the TLB is zero,
- the PID/TID comparison is disabled, so we can use a TID of zero
- to represent all kernel pages as shared among all contexts.
- -- Dan
- */
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
-#ifdef CONFIG_8xx
-#define NO_CONTEXT 16
-#define LAST_CONTEXT 15
-#define FIRST_CONTEXT 0
-
-#elif defined(CONFIG_4xx)
-#define NO_CONTEXT 256
-#define LAST_CONTEXT 255
-#define FIRST_CONTEXT 1
-
-#elif defined(CONFIG_E200) || defined(CONFIG_E500)
-#define NO_CONTEXT 256
-#define LAST_CONTEXT 255
-#define FIRST_CONTEXT 1
-
-#else
-
-/* PPC 6xx, 7xx CPUs */
-#define NO_CONTEXT ((unsigned long) -1)
-#define LAST_CONTEXT 32767
-#define FIRST_CONTEXT 1
-#endif
-
-/*
- * Set the current MMU context.
- * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by
- * loading up the segment registers for the user part of the address space.
- *
- * Since the PGD is immediately available, it is much faster to simply
- * pass this along as a second parameter, which is required for 8xx and
- * can be used for debugging on all processors (if you happen to have
- * an Abatron).
- */
-extern void set_context(unsigned long contextid, pgd_t *pgd);
-
-/*
- * Bitmap of contexts in use.
- * The size of this bitmap is LAST_CONTEXT + 1 bits.
- */
-extern unsigned long context_map[];
-
-/*
- * This caches the next context number that we expect to be free.
- * Its use is an optimization only, we can't rely on this context
- * number to be free, but it usually will be.
- */
-extern unsigned long next_mmu_context;
-
-/*
- * If we don't have sufficient contexts to give one to every task
- * that could be in the system, we need to be able to steal contexts.
- * These variables support that.
- */
-#if LAST_CONTEXT < 30000
-#define FEW_CONTEXTS 1
-extern atomic_t nr_free_contexts;
-extern struct mm_struct *context_mm[LAST_CONTEXT+1];
-extern void steal_context(void);
-#endif
-
-/*
- * Get a new mmu context for the address space described by `mm'.
- */
-static inline void get_mmu_context(struct mm_struct *mm)
-{
- unsigned long ctx;
-
- if (mm->context.id != NO_CONTEXT)
- return;
-#ifdef FEW_CONTEXTS
- while (atomic_dec_if_positive(&nr_free_contexts) < 0)
- steal_context();
-#endif
- ctx = next_mmu_context;
- while (test_and_set_bit(ctx, context_map)) {
- ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
- if (ctx > LAST_CONTEXT)
- ctx = 0;
- }
- next_mmu_context = (ctx + 1) & LAST_CONTEXT;
- mm->context.id = ctx;
-#ifdef FEW_CONTEXTS
- context_mm[ctx] = mm;
-#endif
-}
-
-/*
- * Set up the context for a new address space.
- */
-static inline int init_new_context(struct task_struct *t, struct mm_struct *mm)
-{
- mm->context.id = NO_CONTEXT;
- return 0;
-}
-
-/*
- * We're finished using the context for an address space.
- */
-static inline void destroy_context(struct mm_struct *mm)
-{
- preempt_disable();
- if (mm->context.id != NO_CONTEXT) {
- clear_bit(mm->context.id, context_map);
- mm->context.id = NO_CONTEXT;
-#ifdef FEW_CONTEXTS
- atomic_inc(&nr_free_contexts);
-#endif
- }
- preempt_enable();
-}
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
-{
-#ifdef CONFIG_ALTIVEC
- if (cpu_has_feature(CPU_FTR_ALTIVEC))
- asm volatile ("dssall;\n"
-#ifndef CONFIG_POWER4
- "sync;\n" /* G4 needs a sync here, G5 apparently not */
-#endif
- : : );
-#endif /* CONFIG_ALTIVEC */
-
- tsk->thread.pgdir = next->pgd;
-
- /* No need to flush userspace segments if the mm doesnt change */
- if (prev == next)
- return;
-
- /* Setup new userspace context */
- get_mmu_context(next);
- set_context(next->context.id, next->pgd);
-}
-
-#define deactivate_mm(tsk,mm) do { } while (0)
+#include <asm/cputhreads.h>
/*
- * After we have set current->mm to a new value, this activates
- * the context for the new mm so we see the new mappings.
+ * Most if the context management is out of line
*/
-#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, current)
-
extern void mmu_context_init(void);
-
-
-#else
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-
-/*
- * Copyright (C) 2001 PPC 64 Team, IBM Corp
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-static inline void enter_lazy_tlb(struct mm_struct *mm,
- struct task_struct *tsk)
-{
-}
-
-/*
- * The proto-VSID space has 2^35 - 1 segments available for user mappings.
- * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
- * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
- */
-#define NO_CONTEXT 0
-#define MAX_CONTEXT ((1UL << 19) - 1)
-
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
+extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
+extern void set_context(unsigned long id, pgd_t *pgd);
/*
* switch_mm is the entry point called from the architecture independent
@@ -241,22 +30,39 @@ extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
- if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
- cpu_set(smp_processor_id(), next->cpu_vm_mask);
+ /* Mark this context has been used on the new CPU */
+ cpu_set(smp_processor_id(), next->cpu_vm_mask);
+
+ /* 32-bit keeps track of the current PGDIR in the thread struct */
+#ifdef CONFIG_PPC32
+ tsk->thread.pgdir = next->pgd;
+#endif /* CONFIG_PPC32 */
- /* No need to flush userspace segments if the mm doesnt change */
+ /* Nothing else to do if we aren't actually switching */
if (prev == next)
return;
+ /* We must stop all altivec streams before changing the HW
+ * context
+ */
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile ("dssall");
#endif /* CONFIG_ALTIVEC */
+ /* The actual HW switching method differs between the various
+ * sub architectures.
+ */
+#ifdef CONFIG_PPC_STD_MMU_64
if (cpu_has_feature(CPU_FTR_SLB))
switch_slb(tsk, next);
else
switch_stab(tsk, next);
+#else
+ /* Out of line for now */
+ switch_mmu_context(prev, next);
+#endif
+
}
#define deactivate_mm(tsk,mm) do { } while (0)
@@ -274,6 +80,11 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
local_irq_restore(flags);
}
-#endif /* CONFIG_PPC64 */
+/* We don't currently use enter_lazy_tlb() for anything */
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+ struct task_struct *tsk)
+{
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index 81ef10b6b672..81a23932a160 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -239,6 +239,25 @@ struct mpc52xx_cdm {
u16 mclken_div_psc6; /* CDM + 0x36 reg13 byte2,3 */
};
+/* Interrupt controller Register set */
+struct mpc52xx_intr {
+ u32 per_mask; /* INTR + 0x00 */
+ u32 per_pri1; /* INTR + 0x04 */
+ u32 per_pri2; /* INTR + 0x08 */
+ u32 per_pri3; /* INTR + 0x0c */
+ u32 ctrl; /* INTR + 0x10 */
+ u32 main_mask; /* INTR + 0x14 */
+ u32 main_pri1; /* INTR + 0x18 */
+ u32 main_pri2; /* INTR + 0x1c */
+ u32 reserved1; /* INTR + 0x20 */
+ u32 enc_status; /* INTR + 0x24 */
+ u32 crit_status; /* INTR + 0x28 */
+ u32 main_status; /* INTR + 0x2c */
+ u32 per_status; /* INTR + 0x30 */
+ u32 reserved2; /* INTR + 0x34 */
+ u32 per_error; /* INTR + 0x38 */
+};
+
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h
index 8917ed630565..a218da6bec7c 100644
--- a/arch/powerpc/include/asm/mpc52xx_psc.h
+++ b/arch/powerpc/include/asm/mpc52xx_psc.h
@@ -68,12 +68,20 @@
#define MPC52xx_PSC_IMR_ORERR 0x1000
#define MPC52xx_PSC_IMR_IPC 0x8000
-/* PSC input port change bit */
+/* PSC input port change bits */
#define MPC52xx_PSC_CTS 0x01
#define MPC52xx_PSC_DCD 0x02
#define MPC52xx_PSC_D_CTS 0x10
#define MPC52xx_PSC_D_DCD 0x20
+/* PSC acr bits */
+#define MPC52xx_PSC_IEC_CTS 0x01
+#define MPC52xx_PSC_IEC_DCD 0x02
+
+/* PSC output port bits */
+#define MPC52xx_PSC_OP_RTS 0x01
+#define MPC52xx_PSC_OP_RES 0x02
+
/* PSC mode fields */
#define MPC52xx_PSC_MODE_5_BITS 0x00
#define MPC52xx_PSC_MODE_6_BITS 0x01
@@ -91,6 +99,7 @@
#define MPC52xx_PSC_MODE_ONE_STOP_5_BITS 0x00
#define MPC52xx_PSC_MODE_ONE_STOP 0x07
#define MPC52xx_PSC_MODE_TWO_STOP 0x0f
+#define MPC52xx_PSC_MODE_TXCTS 0x10
#define MPC52xx_PSC_RFNUM_MASK 0x01ff
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h
index 458c1f7fbc18..dabc01c727b8 100644
--- a/arch/powerpc/include/asm/mutex.h
+++ b/arch/powerpc/include/asm/mutex.h
@@ -1,9 +1,134 @@
/*
- * Pull in the generic implementation for the mutex fastpath.
+ * Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm
+ */
+#ifndef _ASM_POWERPC_MUTEX_H
+#define _ASM_POWERPC_MUTEX_H
+
+static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new)
+{
+ int t;
+
+ __asm__ __volatile__ (
+"1: lwarx %0,0,%1 # mutex trylock\n\
+ cmpw 0,%0,%2\n\
+ bne- 2f\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %3,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:"
+ : "=&r" (t)
+ : "r" (&v->counter), "r" (old), "r" (new)
+ : "cc", "memory");
+
+ return t;
+}
+
+static inline int __mutex_dec_return_lock(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1 # mutex lock\n\
+ addic %0,%0,-1\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static inline int __mutex_inc_return_unlock(atomic_t *v)
+{
+ int t;
+
+ __asm__ __volatile__(
+ LWSYNC_ON_SMP
+"1: lwarx %0,0,%1 # mutex unlock\n\
+ addic %0,%0,1\n"
+ PPC405_ERR77(0,%1)
+" stwcx. %0,0,%1 \n\
+ bne- 1b"
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function MUST leave the value lower than
+ * 1 even when the "1" assertion wasn't true.
+ */
+static inline void
+__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(__mutex_dec_return_lock(count) < 0))
+ fail_fn(count);
+}
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns.
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (unlikely(__mutex_dec_return_lock(count) < 0))
+ return fail_fn(count);
+ return 0;
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the count from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 0
+ *
+ * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
+ * In the failure case, this function is allowed to either set the value to
+ * 1, or to set it to a value lower than 1.
+ */
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(__mutex_inc_return_unlock(count) <= 0))
+ fail_fn(count);
+}
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
*
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
+ * Change the count from 1 to 0, and return 1 (success), or if the count
+ * was not 1, then return 0 (failure).
*/
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (likely(__mutex_cmpxchg_lock(count, 1, 0) == 1))
+ return 1;
+ return 0;
+}
-#include <asm-generic/mutex-dec.h>
+#endif
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index c0b8d4a29a91..197d569f5bd3 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -19,12 +19,15 @@
#include <asm/kdump.h>
/*
- * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
+ * On regular PPC32 page size is 4K (but we support 4K/16K/64K pages
+ * on PPC44x). For PPC64 we support either 4K or 64K software
* page size. When using 64K pages however, whether we are really supporting
* 64K pages in HW or not is irrelevant to those definitions.
*/
-#ifdef CONFIG_PPC_64K_PAGES
+#if defined(CONFIG_PPC_64K_PAGES)
#define PAGE_SHIFT 16
+#elif defined(CONFIG_PPC_16K_PAGES)
+#define PAGE_SHIFT 14
#else
#define PAGE_SHIFT 12
#endif
@@ -151,7 +154,7 @@ typedef struct { pte_basic_t pte; } pte_t;
/* 64k pages additionally define a bigger "real PTE" type that gathers
* the "second half" part of the PTE for pseudo 64k pages
*/
-#ifdef CONFIG_PPC_64K_PAGES
+#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
#else
typedef struct { pte_t pte; } real_pte_t;
@@ -191,10 +194,10 @@ typedef pte_basic_t pte_t;
#define pte_val(x) (x)
#define __pte(x) (x)
-#ifdef CONFIG_PPC_64K_PAGES
+#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
#else
-typedef unsigned long real_pte_t;
+typedef pte_t real_pte_t;
#endif
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index d77072a32cc6..1458d9500381 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -19,6 +19,8 @@
#define PTE_FLAGS_OFFSET 0
#endif
+#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */
+
#ifndef __ASSEMBLY__
/*
* The basic type of a PTE - 64 bits for those CPUs with > 32 bit
@@ -26,10 +28,8 @@
*/
#ifdef CONFIG_PTE_64BIT
typedef unsigned long long pte_basic_t;
-#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
#else
typedef unsigned long pte_basic_t;
-#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
#endif
struct page;
@@ -39,6 +39,9 @@ extern void copy_page(void *to, void *from);
#include <asm-generic/page.h>
+#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
+#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PAGE_32_H */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 9047af7baa69..84007afabdb5 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -13,7 +13,6 @@
struct device_node;
-extern unsigned int ppc_pci_flags;
enum {
/* Force re-assigning all resources (ignore firmware
* setup completely)
@@ -36,6 +35,31 @@ enum {
/* ... except for domain 0 */
PPC_PCI_COMPAT_DOMAIN_0 = 0x00000020,
};
+#ifdef CONFIG_PCI
+extern unsigned int ppc_pci_flags;
+
+static inline void ppc_pci_set_flags(int flags)
+{
+ ppc_pci_flags = flags;
+}
+
+static inline void ppc_pci_add_flags(int flags)
+{
+ ppc_pci_flags |= flags;
+}
+
+static inline int ppc_pci_has_flag(int flag)
+{
+ return (ppc_pci_flags & flag);
+}
+#else
+static inline void ppc_pci_set_flags(int flags) { }
+static inline void ppc_pci_add_flags(int flags) { }
+static inline int ppc_pci_has_flag(int flag)
+{
+ return 0;
+}
+#endif
/*
@@ -241,9 +265,6 @@ extern void pcibios_remove_pci_devices(struct pci_bus *bus);
/** Discover new pci devices under this bus, and add them */
extern void pcibios_add_pci_devices(struct pci_bus *bus);
-extern void pcibios_fixup_new_pci_devices(struct pci_bus *bus);
-
-extern int pcibios_remove_root_bus(struct pci_controller *phb);
static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
{
@@ -290,6 +311,7 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
/* Allocate & free a PCI host bridge structure */
extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
extern void pcibios_free_controller(struct pci_controller *phb);
+extern void pcibios_setup_phb_resources(struct pci_controller *hose);
#ifdef CONFIG_PCI
extern unsigned long pci_address_to_pio(phys_addr_t address);
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 57a2a494886b..3548159a1beb 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -38,8 +38,8 @@ struct pci_dev;
* Set this to 1 if you want the kernel to re-assign all PCI
* bus numbers (don't do that on ppc64 yet !)
*/
-#define pcibios_assign_all_busses() (ppc_pci_flags & \
- PPC_PCI_REASSIGN_ALL_BUS)
+#define pcibios_assign_all_busses() \
+ (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
#define pcibios_scan_all_fns(a, b) 0
static inline void pcibios_set_master(struct pci_dev *dev)
@@ -204,15 +204,14 @@ static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
return root;
}
-extern void pcibios_setup_new_device(struct pci_dev *dev);
-
extern void pcibios_claim_one_bus(struct pci_bus *b);
-extern void pcibios_allocate_bus_resources(struct pci_bus *bus);
+extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
extern void pcibios_resource_survey(void);
extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
+extern int remove_phb_dynamic(struct pci_controller *phb);
extern struct pci_dev *of_create_pci_dev(struct device_node *node,
struct pci_bus *bus, int devfn);
@@ -221,6 +220,7 @@ extern void of_scan_pci_bridge(struct device_node *node,
struct pci_dev *dev);
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
+extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
extern int pci_read_irq_line(struct pci_dev *dev);
@@ -235,9 +235,8 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
resource_size_t *start, resource_size_t *end);
-extern void pcibios_do_bus_setup(struct pci_bus *bus);
-extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus);
-
+extern void pcibios_setup_bus_devices(struct pci_bus *bus);
+extern void pcibios_setup_bus_self(struct pci_bus *bus);
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 58c07147b3ea..0815eb40acae 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -3,6 +3,8 @@
#include <linux/threads.h>
+#define PTE_NONCACHE_NUM 0 /* dummy for now to share code w/ppc64 */
+
extern void __bad_pte(pmd_t *pmd);
extern pgd_t *pgd_alloc(struct mm_struct *mm);
@@ -33,10 +35,13 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
-extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
-extern void pte_free(struct mm_struct *mm, pgtable_t pte);
-#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
+static inline void pgtable_free(pgtable_free_t pgf)
+{
+ void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
+
+ free_page((unsigned long)p);
+}
#define check_pgt_cache() do { } while (0)
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 812a1d8f35cb..afda2bdd860f 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -7,7 +7,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
@@ -108,31 +107,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
return page;
}
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
- free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
-{
- pgtable_page_dtor(ptepage);
- __free_page(ptepage);
-}
-
-#define PGF_CACHENUM_MASK 0x7
-
-typedef struct pgtable_free {
- unsigned long val;
-} pgtable_free_t;
-
-static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
- unsigned long mask)
-{
- BUG_ON(cachenum > PGF_CACHENUM_MASK);
-
- return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
-}
-
static inline void pgtable_free(pgtable_free_t pgf)
{
void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
@@ -144,14 +118,6 @@ static inline void pgtable_free(pgtable_free_t pgf)
kmem_cache_free(pgtable_cache[cachenum], p);
}
-extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
-
-#define __pte_free_tlb(tlb,ptepage) \
-do { \
- pgtable_page_dtor(ptepage); \
- pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
- PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
-} while (0)
#define __pmd_free_tlb(tlb, pmd) \
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index b4505ed0f0f2..5d8480265a77 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -2,11 +2,52 @@
#define _ASM_POWERPC_PGALLOC_H
#ifdef __KERNEL__
+#include <linux/mm.h>
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
+{
+ pgtable_page_dtor(ptepage);
+ __free_page(ptepage);
+}
+
+typedef struct pgtable_free {
+ unsigned long val;
+} pgtable_free_t;
+
+#define PGF_CACHENUM_MASK 0x7
+
+static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
+ unsigned long mask)
+{
+ BUG_ON(cachenum > PGF_CACHENUM_MASK);
+
+ return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
+}
+
#ifdef CONFIG_PPC64
#include <asm/pgalloc-64.h>
#else
#include <asm/pgalloc-32.h>
#endif
+extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
+
+#ifdef CONFIG_SMP
+#define __pte_free_tlb(tlb,ptepage) \
+do { \
+ pgtable_page_dtor(ptepage); \
+ pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
+ PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
+} while (0)
+#else
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
+#endif
+
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 6ab7c67cb5ab..f69a4d977729 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -228,9 +228,10 @@ extern int icache_44x_need_flush;
* - FILE *must* be in the bottom three bits because swap cache
* entries use the top 29 bits for TLB2.
*
- * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it
- * doesn't support SMP. So we can use this as software bit, like
- * DIRTY.
+ * - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
+ * because it doesn't support SMP. However, some later 460 variants
+ * have -some- form of SMP support and so I keep the bit there for
+ * future use
*
* With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
* for memory protection related functions (see PTE structure in
@@ -436,20 +437,23 @@ extern int icache_44x_need_flush;
_PAGE_USER | _PAGE_ACCESSED | \
_PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
_PAGE_EXEC | _PAGE_HWEXEC)
+
/*
- * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
- * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
- * to have it in the Linux PTE, and in fact the bit could be reused for
- * another purpose. -- paulus.
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
*/
-
-#ifdef CONFIG_44x
-#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED)
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
#else
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
#endif
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)
+
#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)
+#define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE)
#ifdef CONFIG_PPC_STD_MMU
/* On standard PPC MMU, no user access implies kernel read/write access,
@@ -459,7 +463,7 @@ extern int icache_44x_need_flush;
#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED)
#endif
-#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED)
#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC)
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
@@ -552,9 +556,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
-static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
-static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
-
static inline pte_t pte_wrprotect(pte_t pte) {
pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
static inline pte_t pte_mkclean(pte_t pte) {
@@ -693,10 +694,11 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
#endif
}
+
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
-#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
+#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) && defined(CONFIG_DEBUG_VM)
WARN_ON(pte_present(*ptep));
#endif
__set_pte_at(mm, addr, ptep, pte);
@@ -760,16 +762,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
__changed; \
})
-/*
- * Macro to mark a page protection value as "uncacheable".
- */
-#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
-
-struct file;
-extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-
#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 4c0a8c62859d..b0f18be81d9f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -100,7 +100,7 @@
#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
-/* __pgprot defined in arch/powerpc/incliude/asm/page.h */
+/* __pgprot defined in arch/powerpc/include/asm/page.h */
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
@@ -245,9 +245,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
-static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
-static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
-
static inline pte_t pte_wrprotect(pte_t pte) {
pte_val(pte) &= ~(_PAGE_RW); return pte; }
static inline pte_t pte_mkclean(pte_t pte) {
@@ -405,16 +402,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
__changed; \
})
-/*
- * Macro to mark a page protection value as "uncacheable".
- */
-#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
-
-struct file;
-extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
- unsigned long size, pgprot_t vma_prot);
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-
#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index dbb8ca172e44..07f55e601696 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -16,6 +16,32 @@ struct mm_struct;
#endif
#ifndef __ASSEMBLY__
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+
+#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
+ _PAGE_WRITETHRU)
+
+#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+ _PAGE_NO_CACHE | _PAGE_GUARDED))
+
+#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+ _PAGE_NO_CACHE))
+
+#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+ _PAGE_COHERENT))
+
+#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+ _PAGE_COHERENT | _PAGE_WRITETHRU))
+
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index c4a029ccb4d3..1a0d628eb114 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -425,14 +425,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define fromreal(rd) tovirt(rd,rd)
#define tophys(rd,rs) \
-0: addis rd,rs,-KERNELBASE@h; \
+0: addis rd,rs,-PAGE_OFFSET@h; \
.section ".vtop_fixup","aw"; \
.align 1; \
.long 0b; \
.previous
#define tovirt(rd,rs) \
-0: addis rd,rs,KERNELBASE@h; \
+0: addis rd,rs,PAGE_OFFSET@h; \
.section ".ptov_fixup","aw"; \
.align 1; \
.long 0b; \
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 101ed87f7d84..d3466490104a 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -69,8 +69,6 @@ extern int _prep_type;
#ifdef __KERNEL__
-extern int have_of;
-
struct task_struct;
void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
void release_thread(struct task_struct *);
@@ -207,6 +205,11 @@ struct thread_struct {
#define INIT_SP_LIMIT \
(_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
+#ifdef CONFIG_SPE
+#define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
+#else
+#define SPEFSCR_INIT
+#endif
#ifdef CONFIG_PPC32
#define INIT_THREAD { \
@@ -215,6 +218,7 @@ struct thread_struct {
.fs = KERNEL_DS, \
.pgdir = swapper_pg_dir, \
.fpexc_mode = MSR_FE0 | MSR_FE1, \
+ SPEFSCR_INIT \
}
#else
#define INIT_THREAD { \
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index eb3bd2e1c7f6..6ff04185d2aa 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -253,6 +253,9 @@ extern void kdump_move_device_tree(void);
/* CPU OF node matching */
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
+/* cache lookup */
+struct device_node *of_find_next_cache_node(struct device_node *np);
+
/* Get the MAC address */
extern const void *of_get_mac_address(struct device_node *np);
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index f9e34c493cbb..cff30c0ef1ff 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -305,30 +305,34 @@ static inline const char* ps3_result(int result)
/* system bus routines */
enum ps3_match_id {
- PS3_MATCH_ID_EHCI = 1,
- PS3_MATCH_ID_OHCI = 2,
- PS3_MATCH_ID_GELIC = 3,
- PS3_MATCH_ID_AV_SETTINGS = 4,
- PS3_MATCH_ID_SYSTEM_MANAGER = 5,
- PS3_MATCH_ID_STOR_DISK = 6,
- PS3_MATCH_ID_STOR_ROM = 7,
- PS3_MATCH_ID_STOR_FLASH = 8,
- PS3_MATCH_ID_SOUND = 9,
- PS3_MATCH_ID_GRAPHICS = 10,
- PS3_MATCH_ID_LPM = 11,
+ PS3_MATCH_ID_EHCI = 1,
+ PS3_MATCH_ID_OHCI = 2,
+ PS3_MATCH_ID_GELIC = 3,
+ PS3_MATCH_ID_AV_SETTINGS = 4,
+ PS3_MATCH_ID_SYSTEM_MANAGER = 5,
+ PS3_MATCH_ID_STOR_DISK = 6,
+ PS3_MATCH_ID_STOR_ROM = 7,
+ PS3_MATCH_ID_STOR_FLASH = 8,
+ PS3_MATCH_ID_SOUND = 9,
+ PS3_MATCH_ID_GPU = 10,
+ PS3_MATCH_ID_LPM = 11,
};
-#define PS3_MODULE_ALIAS_EHCI "ps3:1"
-#define PS3_MODULE_ALIAS_OHCI "ps3:2"
-#define PS3_MODULE_ALIAS_GELIC "ps3:3"
-#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4"
-#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5"
-#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6"
-#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7"
-#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8"
-#define PS3_MODULE_ALIAS_SOUND "ps3:9"
-#define PS3_MODULE_ALIAS_GRAPHICS "ps3:10"
-#define PS3_MODULE_ALIAS_LPM "ps3:11"
+enum ps3_match_sub_id {
+ PS3_MATCH_SUB_ID_GPU_FB = 1,
+};
+
+#define PS3_MODULE_ALIAS_EHCI "ps3:1:0"
+#define PS3_MODULE_ALIAS_OHCI "ps3:2:0"
+#define PS3_MODULE_ALIAS_GELIC "ps3:3:0"
+#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4:0"
+#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5:0"
+#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6:0"
+#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7:0"
+#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8:0"
+#define PS3_MODULE_ALIAS_SOUND "ps3:9:0"
+#define PS3_MODULE_ALIAS_GPU_FB "ps3:10:1"
+#define PS3_MODULE_ALIAS_LPM "ps3:11:0"
enum ps3_system_bus_device_type {
PS3_DEVICE_TYPE_IOC0 = 1,
@@ -337,11 +341,6 @@ enum ps3_system_bus_device_type {
PS3_DEVICE_TYPE_LPM,
};
-enum ps3_match_sub_id {
- /* for PS3_MATCH_ID_GRAPHICS */
- PS3_MATCH_SUB_ID_FB = 1,
-};
-
/**
* struct ps3_system_bus_device - a device on the system bus
*/
@@ -516,4 +515,7 @@ void ps3_sync_irq(int node);
u32 ps3_get_hw_thread_id(int cpu);
u64 ps3_get_spe_id(void *arg);
+/* mutex synchronizing GPU accesses and video mode changes */
+extern struct mutex ps3_gpu_mutex;
+
#endif
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h
index 5aa22cffdbd6..cd24ac16660a 100644
--- a/arch/powerpc/include/asm/ps3av.h
+++ b/arch/powerpc/include/asm/ps3av.h
@@ -740,8 +740,4 @@ extern int ps3av_audio_mute(int);
extern int ps3av_audio_mute_analog(int);
extern int ps3av_dev_open(void);
extern int ps3av_dev_close(void);
-extern void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data),
- void *flip_data);
-extern void ps3av_flip_ctl(int on);
-
#endif /* _ASM_POWERPC_PS3AV_H_ */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c6d1ab650778..f484a343efba 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -783,6 +783,10 @@ extern void scom970_write(unsigned int address, unsigned long value);
#define __get_SP() ({unsigned long sp; \
asm volatile("mr %0,1": "=r" (sp)); sp;})
+struct pt_regs;
+
+extern void ppc_save_regs(struct pt_regs *regs);
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 8eaa7b28d9d0..e0175beb4462 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -168,6 +168,7 @@ extern void rtas_os_term(char *str);
extern int rtas_get_sensor(int sensor, int index, int *state);
extern int rtas_get_power_level(int powerdomain, int *level);
extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
+extern bool rtas_indicator_present(int token, int *maxindex);
extern int rtas_set_indicator(int indicator, int index, int new_value);
extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
extern void rtas_progress(char *s, unsigned short hex);
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
index ced34f1dc8f8..3d9f831c3c55 100644
--- a/arch/powerpc/include/asm/sfp-machine.h
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -82,7 +82,7 @@
#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
-#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y)
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y)
#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
/* These macros define what NaN looks like. They're supposed to expand to
@@ -97,6 +97,20 @@
#define _FP_KEEPNANFRACP 1
+#ifdef FP_EX_BOOKE_E500_SPE
+#define FP_EX_INEXACT (1 << 21)
+#define FP_EX_INVALID (1 << 20)
+#define FP_EX_DIVZERO (1 << 19)
+#define FP_EX_UNDERFLOW (1 << 18)
+#define FP_EX_OVERFLOW (1 << 17)
+#define FP_INHIBIT_RESULTS 0
+
+#define __FPU_FPSCR (current->thread.spefscr)
+#define __FPU_ENABLED_EXC \
+({ \
+ (__FPU_FPSCR >> 2) & 0x1f; \
+})
+#else
/* Exception flags. We use the bit positions of the appropriate bits
in the FPSCR, which also correspond to the FE_* bits. This makes
everything easier ;-). */
@@ -111,22 +125,6 @@
#define FP_EX_DIVZERO (1 << (31 - 5))
#define FP_EX_INEXACT (1 << (31 - 6))
-/* This macro appears to be called when both X and Y are NaNs, and
- * has to choose one and copy it to R. i386 goes for the larger of the
- * two, sparc64 just picks Y. I don't understand this at all so I'll
- * go with sparc64 because it's shorter :-> -- PMM
- */
-#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
- do { \
- R##_s = Y##_s; \
- _FP_FRAC_COPY_##wc(R,Y); \
- R##_c = FP_CLS_NAN; \
- } while (0)
-
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-
#define __FPU_FPSCR (current->thread.fpscr.val)
/* We only actually write to the destination register
@@ -137,6 +135,32 @@
(__FPU_FPSCR >> 3) & 0x1f; \
})
+#endif
+
+/*
+ * If one NaN is signaling and the other is not,
+ * we choose that one, otherwise we choose X.
+ */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
+ do { \
+ if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
+ && !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
+ { \
+ R##_s = X##_s; \
+ _FP_FRAC_COPY_##wc(R,X); \
+ } \
+ else \
+ { \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R,Y); \
+ } \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
#define __FPU_TRAP_P(bits) \
((__FPU_ENABLED_EXC & (bits)) != 0)
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 1866cec4f967..c25f73d1d842 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -81,6 +81,13 @@ extern int cpu_to_core_id(int cpu);
#define PPC_MSG_CALL_FUNC_SINGLE 2
#define PPC_MSG_DEBUGGER_BREAK 3
+/*
+ * irq controllers that have dedicated ipis per message and don't
+ * need additional code in the action handler may use this
+ */
+extern int smp_request_message_ipi(int virq, int message);
+extern const char *smp_ipi_name[];
+
void smp_init_iSeries(void);
void smp_init_pSeries(void);
void smp_init_cell(void);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index f56a843f4705..36864364e601 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -277,7 +277,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
bne- 1b"
: "=&r"(tmp)
: "r"(&rw->lock)
- : "cr0", "memory");
+ : "cr0", "xer", "memory");
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index 45963e80f557..28f6ddbff4cf 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -5,6 +5,10 @@
#include <linux/stringify.h>
#include <asm/feature-fixups.h>
+#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
+#define __SUBARCH_HAS_LWSYNC
+#endif
+
#ifndef __ASSEMBLY__
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index d6648c143322..2a4be19a92c4 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -23,15 +23,17 @@
* read_barrier_depends() prevents data-dependent loads being reordered
* across this point (nop on PPC).
*
- * We have to use the sync instructions for mb(), since lwsync doesn't
- * order loads with respect to previous stores. Lwsync is fine for
- * rmb(), though. Note that rmb() actually uses a sync on 32-bit
- * architectures.
+ * *mb() variants without smp_ prefix must order all types of memory
+ * operations with one another. sync is the only instruction sufficient
+ * to do this.
*
- * For wmb(), we use sync since wmb is used in drivers to order
- * stores to system memory with respect to writes to the device.
- * However, smp_wmb() can be a lighter-weight lwsync or eieio barrier
- * on SMP since it is only used to order updates to system memory.
+ * For the smp_ barriers, ordering is for cacheable memory operations
+ * only. We have to use the sync instruction for smp_mb(), since lwsync
+ * doesn't order loads with respect to previous stores. Lwsync can be
+ * used for smp_rmb() and smp_wmb().
+ *
+ * However, on CPUs that don't support lwsync, lwsync actually maps to a
+ * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
@@ -45,14 +47,14 @@
#ifdef CONFIG_SMP
#ifdef __SUBARCH_HAS_LWSYNC
-# define SMPWMB lwsync
+# define SMPWMB LWSYNC
#else
# define SMPWMB eieio
#endif
#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() __asm__ __volatile__ (__stringify(SMPWMB) : : :"memory")
+#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
+#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() barrier()
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index febd581ec9b0..27ccb764fdab 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -48,26 +48,6 @@ extern unsigned long ppc_proc_freq;
extern unsigned long ppc_tb_freq;
#define DEFAULT_TB_FREQ 125000000UL
-/*
- * By putting all of this stuff into a single struct we
- * reduce the number of cache lines touched by do_gettimeofday.
- * Both by collecting all of the data in one cache line and
- * by touching only one TOC entry on ppc64.
- */
-struct gettimeofday_vars {
- u64 tb_to_xs;
- u64 stamp_xsec;
- u64 tb_orig_stamp;
-};
-
-struct gettimeofday_struct {
- unsigned long tb_ticks_per_sec;
- struct gettimeofday_vars vars[2];
- struct gettimeofday_vars * volatile varp;
- unsigned var_idx;
- unsigned tb_to_us;
-};
-
struct div_result {
u64 result_high;
u64 result_low;
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index a2c6bfd85fb7..abbe3419d1dd 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -6,6 +6,9 @@
*
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
+ * - local_flush_tlb_mm(mm) flushes the specified mm context on
+ * the local processor
+ * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
* - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
@@ -17,7 +20,7 @@
*/
#ifdef __KERNEL__
-#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
+#ifdef CONFIG_PPC_MMU_NOHASH
/*
* TLB flushing for software loaded TLB chips
*
@@ -28,63 +31,49 @@
#include <linux/mm.h>
-extern void _tlbie(unsigned long address, unsigned int pid);
-extern void _tlbil_all(void);
-extern void _tlbil_pid(unsigned int pid);
-extern void _tlbil_va(unsigned long address, unsigned int pid);
+#define MMU_NO_CONTEXT ((unsigned int)-1)
-#if defined(CONFIG_40x) || defined(CONFIG_8xx)
-#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
-#else /* CONFIG_44x || CONFIG_FSL_BOOKE */
-extern void _tlbia(void);
-#endif
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
- _tlbil_pid(mm->context.id);
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
-{
- _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0);
-}
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
- unsigned long vmaddr)
-{
- flush_tlb_page(vma, vmaddr);
-}
+extern void local_flush_tlb_mm(struct mm_struct *mm);
+extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- _tlbil_pid(vma->vm_mm->context.id);
-}
+#ifdef CONFIG_SMP
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+#else
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
+#endif
+#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
-static inline void flush_tlb_kernel_range(unsigned long start,
- unsigned long end)
-{
- _tlbil_pid(0);
-}
+#elif defined(CONFIG_PPC_STD_MMU_32)
-#elif defined(CONFIG_PPC32)
/*
- * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx
+ * TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
*/
-extern void _tlbie(unsigned long address);
-extern void _tlbia(void);
-
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+ flush_tlb_page(vma, vmaddr);
+}
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ flush_tlb_mm(mm);
+}
+
+#elif defined(CONFIG_PPC_STD_MMU_64)
-#else
/*
- * TLB flushing for 64-bit has-MMU CPUs
+ * TLB flushing for 64-bit hash-MMU CPUs
*/
#include <linux/percpu.h>
@@ -134,10 +123,19 @@ extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
extern void flush_hash_range(unsigned long number, int local);
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+}
+
static inline void flush_tlb_mm(struct mm_struct *mm)
{
}
+static inline void local_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long vmaddr)
+{
+}
+
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
@@ -162,7 +160,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
unsigned long end);
-
+#else
+#error Unsupported MMU type
#endif
#endif /*__KERNEL__ */
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index f01393224b52..13c2c283e178 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -39,6 +39,7 @@
#ifndef __ASSEMBLY__
#include <linux/unistd.h>
+#include <linux/time.h>
#define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32)
@@ -83,6 +84,7 @@ struct vdso_data {
__u32 icache_log_block_size; /* L1 i-cache log block size */
__s32 wtom_clock_sec; /* Wall to monotonic clock */
__s32 wtom_clock_nsec;
+ struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
__u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
};
@@ -102,6 +104,7 @@ struct vdso_data {
__u32 tz_dsttime; /* Type of dst correction 0x5C */
__s32 wtom_clock_sec; /* Wall to monotonic clock */
__s32 wtom_clock_nsec;
+ struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
__u32 dcache_block_size; /* L1 d-cache block size */
__u32 icache_block_size; /* L1 i-cache block size */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index d17edb4a2f9d..1308a86e9070 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -103,6 +103,10 @@ endif
obj-$(CONFIG_PPC64) += $(obj64-y)
+ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),)
+obj-y += ppc_save_regs.o
+endif
+
extra-$(CONFIG_PPC_FPU) += fpu.o
extra-$(CONFIG_PPC64) += entry_64.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 75c5dd0138fd..661d07d2146b 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -60,6 +60,7 @@ int main(void)
{
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(MM, offsetof(struct task_struct, mm));
+ DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id));
#ifdef CONFIG_PPC64
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
#else
@@ -306,6 +307,7 @@ int main(void)
DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32));
DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec));
DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
+ DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime));
DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size));
DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size));
DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size));
@@ -378,6 +380,10 @@ int main(void)
DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
#endif
+#ifdef CONFIG_44x
+ DEFINE(PGD_T_LOG2, PGD_T_LOG2);
+ DEFINE(PTE_T_LOG2, PTE_T_LOG2);
+#endif
return 0;
}
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 7e8719504f39..923f87aff20a 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -19,6 +19,7 @@
#include <asm/oprofile_impl.h>
#include <asm/cputable.h>
#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
+#include <asm/mmu.h>
struct cpu_spec* cur_cpu_spec = NULL;
EXPORT_SYMBOL(cur_cpu_spec);
@@ -94,6 +95,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER3 (630)",
.cpu_features = CPU_FTRS_POWER3,
.cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -109,6 +111,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER3 (630+)",
.cpu_features = CPU_FTRS_POWER3,
.cpu_user_features = COMMON_USER_PPC64|PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -124,6 +127,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "RS64-II (northstar)",
.cpu_features = CPU_FTRS_RS64,
.cpu_user_features = COMMON_USER_PPC64,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -139,6 +143,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "RS64-III (pulsar)",
.cpu_features = CPU_FTRS_RS64,
.cpu_user_features = COMMON_USER_PPC64,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -154,6 +159,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "RS64-III (icestar)",
.cpu_features = CPU_FTRS_RS64,
.cpu_user_features = COMMON_USER_PPC64,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -169,6 +175,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "RS64-IV (sstar)",
.cpu_features = CPU_FTRS_RS64,
.cpu_user_features = COMMON_USER_PPC64,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -184,6 +191,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4 (gp)",
.cpu_features = CPU_FTRS_POWER4,
.cpu_user_features = COMMON_USER_POWER4,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -199,6 +207,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4+ (gq)",
.cpu_features = CPU_FTRS_POWER4,
.cpu_user_features = COMMON_USER_POWER4,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -215,6 +224,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -233,6 +243,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -251,6 +262,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -269,6 +281,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -287,6 +300,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -303,6 +317,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5 (gr)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -323,6 +338,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5+ (gs)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5_PLUS,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -339,6 +355,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5+ (gs)",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5_PLUS,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -356,6 +373,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER5+",
.cpu_features = CPU_FTRS_POWER5,
.cpu_user_features = COMMON_USER_POWER5_PLUS,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.machine_check = machine_check_generic,
@@ -369,6 +387,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_POWER6,
.cpu_user_features = COMMON_USER_POWER6 |
PPC_FEATURE_POWER6_EXT,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -388,6 +407,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER6 (architected)",
.cpu_features = CPU_FTRS_POWER6,
.cpu_user_features = COMMON_USER_POWER6,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.machine_check = machine_check_generic,
@@ -400,6 +420,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7 (architected)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.machine_check = machine_check_generic,
@@ -412,6 +433,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7 (raw)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -434,6 +456,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_PPC64 |
PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP |
PPC_FEATURE_SMT,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 4,
@@ -449,6 +472,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "PA6T",
.cpu_features = CPU_FTRS_PA6T,
.cpu_user_features = COMMON_USER_PA6T,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 6,
@@ -466,6 +490,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER4 (compatible)",
.cpu_features = CPU_FTRS_COMPATIBLE,
.cpu_user_features = COMMON_USER_PPC64,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -483,6 +508,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC601,
.cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR |
PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_generic,
@@ -494,6 +520,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "603",
.cpu_features = CPU_FTRS_603,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = 0,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -506,6 +533,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "603e",
.cpu_features = CPU_FTRS_603,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = 0,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -518,6 +546,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "603ev",
.cpu_features = CPU_FTRS_603,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = 0,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -530,6 +559,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "604",
.cpu_features = CPU_FTRS_604,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 2,
@@ -543,6 +573,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "604e",
.cpu_features = CPU_FTRS_604,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -556,6 +587,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "604r",
.cpu_features = CPU_FTRS_604,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -569,6 +601,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "604ev",
.cpu_features = CPU_FTRS_604,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -582,6 +615,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "740/750",
.cpu_features = CPU_FTRS_740_NOTAU,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -595,6 +629,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750CX",
.cpu_features = CPU_FTRS_750,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -608,6 +643,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750CX",
.cpu_features = CPU_FTRS_750,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -622,6 +658,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750CXe",
.cpu_features = CPU_FTRS_750,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -636,6 +673,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750CXe",
.cpu_features = CPU_FTRS_750,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -650,6 +688,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750CL",
.cpu_features = CPU_FTRS_750CL,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -664,6 +703,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "745/755",
.cpu_features = CPU_FTRS_750,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -678,6 +718,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX1,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -692,6 +733,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX2,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -706,6 +748,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750FX",
.cpu_features = CPU_FTRS_750FX,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -720,6 +763,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "750GX",
.cpu_features = CPU_FTRS_750GX,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -734,6 +778,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "740/750",
.cpu_features = CPU_FTRS_740,
.cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -749,6 +794,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7400_NOTAU,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -764,6 +810,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7400,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -779,6 +826,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7400,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -794,6 +842,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7450_20,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -811,6 +860,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7450_21,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -828,6 +878,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7450_23,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -845,6 +896,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7455_1,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -862,6 +914,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7455_20,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -879,6 +932,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7455,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -896,6 +950,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7447_10,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -913,6 +968,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7447_10,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -929,6 +985,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "7447/7457",
.cpu_features = CPU_FTRS_7447,
.cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -946,6 +1003,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7447A,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -963,6 +1021,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_7448,
.cpu_user_features = COMMON_USER |
PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 6,
@@ -979,6 +1038,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "82xx",
.cpu_features = CPU_FTRS_82XX,
.cpu_user_features = COMMON_USER,
+ .mmu_features = 0,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -991,6 +1051,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "G2_LE",
.cpu_features = CPU_FTRS_G2_LE,
.cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -1003,6 +1064,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "e300c1",
.cpu_features = CPU_FTRS_E300,
.cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -1015,6 +1077,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "e300c2",
.cpu_features = CPU_FTRS_E300C2,
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -1027,6 +1090,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "e300c3",
.cpu_features = CPU_FTRS_E300,
.cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -1041,6 +1105,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "e300c4",
.cpu_features = CPU_FTRS_E300,
.cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
@@ -1056,6 +1121,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "(generic PPC)",
.cpu_features = CPU_FTRS_CLASSIC32,
.cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_generic,
@@ -1071,6 +1137,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
* if the 8xx code is there.... */
.cpu_features = CPU_FTRS_8XX,
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .mmu_features = MMU_FTR_TYPE_8xx,
.icache_bsize = 16,
.dcache_bsize = 16,
.platform = "ppc823",
@@ -1083,6 +1150,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "403GC",
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 16,
.dcache_bsize = 16,
.machine_check = machine_check_4xx,
@@ -1095,6 +1163,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 16,
.dcache_bsize = 16,
.machine_check = machine_check_4xx,
@@ -1106,6 +1175,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "403G ??",
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 16,
.dcache_bsize = 16,
.machine_check = machine_check_4xx,
@@ -1118,6 +1188,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1130,6 +1201,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1142,6 +1214,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1154,6 +1227,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1166,6 +1240,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1178,6 +1253,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1190,6 +1266,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1202,6 +1279,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1213,6 +1291,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "405LP",
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1225,6 +1304,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1237,6 +1317,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1249,6 +1330,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1261,6 +1343,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1273,6 +1356,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1286,6 +1370,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1298,6 +1383,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_40X,
.cpu_user_features = PPC_FEATURE_32 |
PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1312,6 +1398,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GR Rev. A",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1323,6 +1410,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440EP Rev. A",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440ep,
@@ -1335,6 +1423,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GR Rev. B",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1346,6 +1435,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440EP Rev. C",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440ep,
@@ -1358,6 +1448,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440EP Rev. B",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440ep,
@@ -1370,6 +1461,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GRX",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440grx,
@@ -1382,6 +1474,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440EPX",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440epx,
@@ -1394,6 +1487,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GP Rev. B",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1405,6 +1499,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GP Rev. C",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1416,6 +1511,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GX Rev. A",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440gx,
@@ -1428,6 +1524,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GX Rev. B",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440gx,
@@ -1440,6 +1537,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GX Rev. C",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440gx,
@@ -1452,6 +1550,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440GX Rev. F",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440gx,
@@ -1464,6 +1563,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440SP Rev. A",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1475,6 +1575,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440SPe Rev. A",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440spe,
@@ -1487,6 +1588,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440SPe Rev. B",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440spe,
@@ -1499,6 +1601,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "440 in Virtex-5 FXT",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_440x5,
@@ -1509,8 +1612,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pvr_mask = 0xffff0002,
.pvr_value = 0x13020002,
.cpu_name = "460EX",
- .cpu_features = CPU_FTRS_44X,
+ .cpu_features = CPU_FTRS_440x6,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_460ex,
@@ -1521,8 +1625,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pvr_mask = 0xffff0002,
.pvr_value = 0x13020000,
.cpu_name = "460GT",
- .cpu_features = CPU_FTRS_44X,
+ .cpu_features = CPU_FTRS_440x6,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_460gt,
@@ -1535,6 +1640,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "(generic 44x PPC)",
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_4xx,
@@ -1551,6 +1657,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_EFP_SINGLE |
PPC_FEATURE_UNIFIED_CACHE,
+ .mmu_features = MMU_FTR_TYPE_FSL_E,
.dcache_bsize = 32,
.machine_check = machine_check_e200,
.platform = "ppc5554",
@@ -1565,6 +1672,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_SPE_COMP |
PPC_FEATURE_HAS_EFP_SINGLE_COMP |
PPC_FEATURE_UNIFIED_CACHE,
+ .mmu_features = MMU_FTR_TYPE_FSL_E,
.dcache_bsize = 32,
.machine_check = machine_check_e200,
.platform = "ppc5554",
@@ -1577,6 +1685,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_EFP_SINGLE |
PPC_FEATURE_UNIFIED_CACHE,
+ .mmu_features = MMU_FTR_TYPE_FSL_E,
.dcache_bsize = 32,
.machine_check = machine_check_e200,
.platform = "ppc5554",
@@ -1591,6 +1700,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_SPE_COMP |
PPC_FEATURE_HAS_EFP_SINGLE_COMP,
+ .mmu_features = MMU_FTR_TYPE_FSL_E,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -1608,6 +1718,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
PPC_FEATURE_HAS_SPE_COMP |
PPC_FEATURE_HAS_EFP_SINGLE_COMP |
PPC_FEATURE_HAS_EFP_DOUBLE_COMP,
+ .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS,
.icache_bsize = 32,
.dcache_bsize = 32,
.num_pmcs = 4,
@@ -1622,6 +1733,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "e500mc",
.cpu_features = CPU_FTRS_E500MC,
.cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS,
.icache_bsize = 64,
.dcache_bsize = 64,
.num_pmcs = 4,
@@ -1638,6 +1750,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_SPE_COMP |
PPC_FEATURE_HAS_EFP_SINGLE_COMP,
+ .mmu_features = MMU_FTR_TYPE_FSL_E,
.icache_bsize = 32,
.dcache_bsize = 32,
.machine_check = machine_check_e500,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 3a6eaa876ee1..1c5c8a6fc129 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -120,6 +120,26 @@ static inline void dma_direct_unmap_page(struct device *dev,
{
}
+#ifdef CONFIG_NOT_COHERENT_CACHE
+static inline void dma_direct_sync_sg(struct device *dev,
+ struct scatterlist *sgl, int nents,
+ enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+}
+
+static inline void dma_direct_sync_single_range(struct device *dev,
+ dma_addr_t dma_handle, unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
+}
+#endif
+
struct dma_mapping_ops dma_direct_ops = {
.alloc_coherent = dma_direct_alloc_coherent,
.free_coherent = dma_direct_free_coherent,
@@ -128,5 +148,11 @@ struct dma_mapping_ops dma_direct_ops = {
.dma_supported = dma_direct_dma_supported,
.map_page = dma_direct_map_page,
.unmap_page = dma_direct_unmap_page,
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ .sync_single_range_for_cpu = dma_direct_sync_single_range,
+ .sync_single_range_for_device = dma_direct_sync_single_range,
+ .sync_sg_for_cpu = dma_direct_sync_sg,
+ .sync_sg_for_device = dma_direct_sync_sg,
+#endif
};
EXPORT_SYMBOL(dma_direct_ops);
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 0c326823c6d4..a1c4cfd25ded 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -31,6 +31,7 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
+#include <asm/bug.h>
/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
#define LOAD_BAT(n, reg, RA, RB) \
@@ -182,7 +183,8 @@ __after_mmu_off:
bl reloc_offset
mr r26,r3
addis r4,r3,KERNELBASE@h /* current address of _start */
- cmpwi 0,r4,0 /* are we already running at 0? */
+ lis r5,PHYSICAL_START@h
+ cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
bne relocate_kernel
/*
* we now have the 1st 16M of ram mapped with the bats.
@@ -810,13 +812,13 @@ giveup_altivec:
/*
* This code is jumped to from the startup code to copy
- * the kernel image to physical address 0.
+ * the kernel image to physical address PHYSICAL_START.
*/
relocate_kernel:
addis r9,r26,klimit@ha /* fetch klimit */
lwz r25,klimit@l(r9)
addis r25,r25,-KERNELBASE@h
- li r3,0 /* Destination base address */
+ lis r3,PHYSICAL_START@h /* Destination base address */
li r6,0 /* Destination offset */
li r5,0x4000 /* # bytes of memory to copy */
bl copy_and_flush /* copy the first 0x4000 bytes */
@@ -989,12 +991,12 @@ load_up_mmu:
LOAD_BAT(1,r3,r4,r5)
LOAD_BAT(2,r3,r4,r5)
LOAD_BAT(3,r3,r4,r5)
-BEGIN_FTR_SECTION
+BEGIN_MMU_FTR_SECTION
LOAD_BAT(4,r3,r4,r5)
LOAD_BAT(5,r3,r4,r5)
LOAD_BAT(6,r3,r4,r5)
LOAD_BAT(7,r3,r4,r5)
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr
/*
@@ -1070,9 +1072,14 @@ start_here:
RFI
/*
+ * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
+ *
* Set up the segment registers for a new context.
*/
-_ENTRY(set_context)
+_ENTRY(switch_mmu_context)
+ lwz r3,MMCONTEXTID(r4)
+ cmpwi cr0,r3,0
+ blt- 4f
mulli r3,r3,897 /* multiply context by skew factor */
rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
addis r3,r3,0x6000 /* Set Ks, Ku bits */
@@ -1083,6 +1090,7 @@ _ENTRY(set_context)
/* Context switch the PTE pointer for the Abatron BDI2000.
* The PGDIR is passed as second argument.
*/
+ lwz r4,MM_PGD(r4)
lis r5, KERNELBASE@h
lwz r5, 0xf0(r5)
stw r4, 0x4(r5)
@@ -1098,6 +1106,9 @@ _ENTRY(set_context)
sync
isync
blr
+4: trap
+ EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
+ blr
/*
* An undocumented "feature" of 604e requires that the v bit
@@ -1131,7 +1142,7 @@ clear_bats:
mtspr SPRN_IBAT2L,r10
mtspr SPRN_IBAT3U,r10
mtspr SPRN_IBAT3L,r10
-BEGIN_FTR_SECTION
+BEGIN_MMU_FTR_SECTION
/* Here's a tweak: at this point, CPU setup have
* not been called yet, so HIGH_BAT_EN may not be
* set in HID0 for the 745x processors. However, it
@@ -1154,7 +1165,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_IBAT6L,r10
mtspr SPRN_IBAT7U,r10
mtspr SPRN_IBAT7L,r10
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr
flush_tlbs:
@@ -1178,11 +1189,11 @@ mmu_off:
/*
* Use the first pair of BAT registers to map the 1st 16MB
- * of RAM to KERNELBASE. From this point on we can't safely
+ * of RAM to PAGE_OFFSET. From this point on we can't safely
* call OF any more.
*/
initial_bats:
- lis r11,KERNELBASE@h
+ lis r11,PAGE_OFFSET@h
mfspr r9,SPRN_PVR
rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
cmpwi 0,r9,1
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index f3a1ea9d7fe4..b56fecc93a16 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -69,6 +69,17 @@ _ENTRY(_start);
li r24,0 /* CPU number */
/*
+ * In case the firmware didn't do it, we apply some workarounds
+ * that are good for all 440 core variants here
+ */
+ mfspr r3,SPRN_CCR0
+ rlwinm r3,r3,0,0,27 /* disable icache prefetch */
+ isync
+ mtspr SPRN_CCR0,r3
+ isync
+ sync
+
+/*
* Set up the initial MMU state
*
* We are still executing code at the virtual address
@@ -391,12 +402,14 @@ interrupt_base:
rlwimi r13,r12,10,30,30
/* Load the PTE */
- rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
+ /* Compute pgdir/pmd offset */
+ rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
lwzx r11, r12, r11 /* Get pgd/pmd entry */
rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
beq 2f /* Bail if no table */
- rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
+ /* Compute pte address */
+ rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
lwz r11, 0(r12) /* Get high word of pte entry */
lwz r12, 4(r12) /* Get low word of pte entry */
@@ -485,12 +498,14 @@ tlb_44x_patch_hwater_D:
/* Make up the required permissions */
li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_HWEXEC
- rlwinm r12, r10, 13, 19, 29 /* Compute pgdir/pmd offset */
+ /* Compute pgdir/pmd offset */
+ rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
lwzx r11, r12, r11 /* Get pgd/pmd entry */
rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
beq 2f /* Bail if no table */
- rlwimi r12, r10, 23, 20, 28 /* Compute pte address */
+ /* Compute pte address */
+ rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
lwz r11, 0(r12) /* Get high word of pte entry */
lwz r12, 4(r12) /* Get low word of pte entry */
@@ -554,15 +569,16 @@ tlb_44x_patch_hwater_I:
*/
finish_tlb_load:
/* Combine RPN & ERPN an write WS 0 */
- rlwimi r11,r12,0,0,19
+ rlwimi r11,r12,0,0,31-PAGE_SHIFT
tlbwe r11,r13,PPC44x_TLB_XLAT
/*
* Create WS1. This is the faulting address (EPN),
* page size, and valid flag.
*/
- li r11,PPC44x_TLB_VALID | PPC44x_TLB_4K
- rlwimi r10,r11,0,20,31 /* Insert valid and page size*/
+ li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
+ /* Insert valid and page size */
+ rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
/* And WS 2 */
@@ -634,12 +650,12 @@ _GLOBAL(set_context)
* goes at the beginning of the data segment, which is page-aligned.
*/
.data
- .align 12
+ .align PAGE_SHIFT
.globl sdata
sdata:
.globl empty_zero_page
empty_zero_page:
- .space 4096
+ .space PAGE_SIZE
/*
* To support >32-bit physical addresses, we use an 8KB pgdir.
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 590304c24dad..11b549acc034 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -92,6 +92,7 @@ _ENTRY(_start);
* if needed
*/
+_ENTRY(__early_start)
/* 1. Find the index of the entry we're executing in */
bl invstr /* Find our address */
invstr: mflr r6 /* Make it accessible */
@@ -235,36 +236,40 @@ skpinv: addi r6,r6,1 /* Increment */
tlbivax 0,r9
TLBSYNC
+/* The mapping only needs to be cache-coherent on SMP */
+#ifdef CONFIG_SMP
+#define M_IF_SMP MAS2_M
+#else
+#define M_IF_SMP 0
+#endif
+
/* 6. Setup KERNELBASE mapping in TLB1[0] */
lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
mtspr SPRN_MAS0,r6
lis r6,(MAS1_VALID|MAS1_IPROT)@h
ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l
mtspr SPRN_MAS1,r6
- li r7,0
- lis r6,PAGE_OFFSET@h
- ori r6,r6,PAGE_OFFSET@l
- rlwimi r6,r7,0,20,31
+ lis r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@h
+ ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOKE_PAGESZ_64M, M_IF_SMP)@l
mtspr SPRN_MAS2,r6
mtspr SPRN_MAS3,r8
tlbwe
/* 7. Jump to KERNELBASE mapping */
- lis r6,KERNELBASE@h
- ori r6,r6,KERNELBASE@l
- rlwimi r6,r7,0,20,31
+ lis r6,(KERNELBASE & ~0xfff)@h
+ ori r6,r6,(KERNELBASE & ~0xfff)@l
lis r7,MSR_KERNEL@h
ori r7,r7,MSR_KERNEL@l
bl 1f /* Find our address */
1: mflr r9
rlwimi r6,r9,0,20,31
- addi r6,r6,24
+ addi r6,r6,(2f - 1b)
mtspr SPRN_SRR0,r6
mtspr SPRN_SRR1,r7
rfi /* start execution out of TLB1[0] entry */
/* 8. Clear out the temp mapping */
- lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
+2: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
mtspr SPRN_MAS0,r7
tlbre
@@ -344,6 +349,15 @@ skpinv: addi r6,r6,1 /* Increment */
mtspr SPRN_DBSR,r2
#endif
+#ifdef CONFIG_SMP
+ /* Check to see if we're the second processor, and jump
+ * to the secondary_start code if so
+ */
+ mfspr r24,SPRN_PIR
+ cmpwi r24,0
+ bne __secondary_start
+#endif
+
/*
* This is where the main kernel code starts.
*/
@@ -685,12 +699,13 @@ interrupt_base:
/* SPE Floating Point Data */
#ifdef CONFIG_SPE
EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE);
-#else
- EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
-#endif /* CONFIG_SPE */
/* SPE Floating Point Round */
+ EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE)
+#else
+ EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE)
EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE)
+#endif /* CONFIG_SPE */
/* Performance Monitor */
EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD)
@@ -735,6 +750,9 @@ finish_tlb_load:
#else
rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */
#endif
+#ifdef CONFIG_SMP
+ ori r12, r12, MAS2_M
+#endif
mtspr SPRN_MAS2, r12
li r10, (_PAGE_HWEXEC | _PAGE_PRESENT)
@@ -746,15 +764,15 @@ finish_tlb_load:
iseleq r12, r12, r10
#ifdef CONFIG_PTE_64BIT
-2: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
+ rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */
rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */
mtspr SPRN_MAS3, r12
-BEGIN_FTR_SECTION
+BEGIN_MMU_FTR_SECTION
srwi r10, r13, 8 /* grab RPN[8:31] */
mtspr SPRN_MAS7, r10
-END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
#else
-2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
+ rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */
mtspr SPRN_MAS3, r11
#endif
#ifdef CONFIG_E200
@@ -1037,6 +1055,63 @@ _GLOBAL(flush_dcache_L1)
blr
+#ifdef CONFIG_SMP
+/* When we get here, r24 needs to hold the CPU # */
+ .globl __secondary_start
+__secondary_start:
+ lis r3,__secondary_hold_acknowledge@h
+ ori r3,r3,__secondary_hold_acknowledge@l
+ stw r24,0(r3)
+
+ li r3,0
+ mr r4,r24 /* Why? */
+ bl call_setup_cpu
+
+ lis r3,tlbcam_index@ha
+ lwz r3,tlbcam_index@l(r3)
+ mtctr r3
+ li r26,0 /* r26 safe? */
+
+ /* Load each CAM entry */
+1: mr r3,r26
+ bl loadcam_entry
+ addi r26,r26,1
+ bdnz 1b
+
+ /* get current_thread_info and current */
+ lis r1,secondary_ti@ha
+ lwz r1,secondary_ti@l(r1)
+ lwz r2,TI_TASK(r1)
+
+ /* stack */
+ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
+ li r0,0
+ stw r0,0(r1)
+
+ /* ptr to current thread */
+ addi r4,r2,THREAD /* address of our thread_struct */
+ mtspr SPRN_SPRG3,r4
+
+ /* Setup the defaults for TLB entries */
+ li r4,(MAS4_TSIZED(BOOKE_PAGESZ_4K))@l
+ mtspr SPRN_MAS4,r4
+
+ /* Jump to start_secondary */
+ lis r4,MSR_KERNEL@h
+ ori r4,r4,MSR_KERNEL@l
+ lis r3,start_secondary@h
+ ori r3,r3,start_secondary@l
+ mtspr SPRN_SRR0,r3
+ mtspr SPRN_SRR1,r4
+ sync
+ rfi
+ sync
+
+ .globl __secondary_hold_acknowledge
+__secondary_hold_acknowledge:
+ .long -1
+#endif
+
/*
* We put a few things here that have to be page-aligned. This stuff
* goes at the beginning of the data segment, which is page-aligned.
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index 64299d28f364..6e3f62493659 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -47,7 +47,7 @@
#include <asm/abs_addr.h>
static struct device ibmebus_bus_device = { /* fake "parent" device */
- .bus_id = "ibmebus",
+ .init_name = "ibmebus",
};
struct bus_type ibmebus_bus_type;
@@ -231,6 +231,7 @@ void ibmebus_free_irq(u32 ist, void *dev_id)
unsigned int irq = irq_find_mapping(NULL, ist);
free_irq(irq, dev_id);
+ irq_dispose_mapping(irq);
}
EXPORT_SYMBOL(ibmebus_free_irq);
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index ac2a21f45c75..b3abebb7ee64 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -13,13 +13,17 @@
#include <linux/reboot.h>
#include <linux/threads.h>
#include <linux/lmb.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/prom.h>
+#include <asm/sections.h>
void machine_crash_shutdown(struct pt_regs *regs)
{
if (ppc_md.machine_crash_shutdown)
ppc_md.machine_crash_shutdown(regs);
+ else
+ default_machine_crash_shutdown(regs);
}
/*
@@ -31,11 +35,8 @@ int machine_kexec_prepare(struct kimage *image)
{
if (ppc_md.machine_kexec_prepare)
return ppc_md.machine_kexec_prepare(image);
- /*
- * Fail if platform doesn't provide its own machine_kexec_prepare
- * implementation.
- */
- return -ENOSYS;
+ else
+ return default_machine_kexec_prepare(image);
}
void machine_kexec_cleanup(struct kimage *image)
@@ -52,13 +53,11 @@ void machine_kexec(struct kimage *image)
{
if (ppc_md.machine_kexec)
ppc_md.machine_kexec(image);
- else {
- /*
- * Fall back to normal restart if platform doesn't provide
- * its own kexec function, and user insist to kexec...
- */
- machine_restart(NULL);
- }
+ else
+ default_machine_kexec(image);
+
+ /* Fall back to normal restart if we're still alive. */
+ machine_restart(NULL);
for(;;);
}
@@ -118,3 +117,71 @@ int overlaps_crashkernel(unsigned long start, unsigned long size)
{
return (start + size) > crashk_res.start && start <= crashk_res.end;
}
+
+/* Values we need to export to the second kernel via the device tree. */
+static unsigned long kernel_end;
+static unsigned long crashk_size;
+
+static struct property kernel_end_prop = {
+ .name = "linux,kernel-end",
+ .length = sizeof(unsigned long),
+ .value = &kernel_end,
+};
+
+static struct property crashk_base_prop = {
+ .name = "linux,crashkernel-base",
+ .length = sizeof(unsigned long),
+ .value = &crashk_res.start,
+};
+
+static struct property crashk_size_prop = {
+ .name = "linux,crashkernel-size",
+ .length = sizeof(unsigned long),
+ .value = &crashk_size,
+};
+
+static void __init export_crashk_values(struct device_node *node)
+{
+ struct property *prop;
+
+ /* There might be existing crash kernel properties, but we can't
+ * be sure what's in them, so remove them. */
+ prop = of_find_property(node, "linux,crashkernel-base", NULL);
+ if (prop)
+ prom_remove_property(node, prop);
+
+ prop = of_find_property(node, "linux,crashkernel-size", NULL);
+ if (prop)
+ prom_remove_property(node, prop);
+
+ if (crashk_res.start != 0) {
+ prom_add_property(node, &crashk_base_prop);
+ crashk_size = crashk_res.end - crashk_res.start + 1;
+ prom_add_property(node, &crashk_size_prop);
+ }
+}
+
+static int __init kexec_setup(void)
+{
+ struct device_node *node;
+ struct property *prop;
+
+ node = of_find_node_by_path("/chosen");
+ if (!node)
+ return -ENOENT;
+
+ /* remove any stale properties so ours can be found */
+ prop = of_find_property(node, kernel_end_prop.name, NULL);
+ if (prop)
+ prom_remove_property(node, prop);
+
+ /* information needed by userspace when using default_machine_kexec */
+ kernel_end = __pa(_end);
+ prom_add_property(node, &kernel_end_prop);
+
+ export_crashk_values(node);
+
+ of_node_put(node);
+ return 0;
+}
+late_initcall(kexec_setup);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 3c4ca046e854..49e705fcee6d 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -289,7 +289,7 @@ void default_machine_kexec(struct kimage *image)
}
/* Values we need to export to the second kernel via the device tree. */
-static unsigned long htab_base, kernel_end;
+static unsigned long htab_base;
static struct property htab_base_prop = {
.name = "linux,htab-base",
@@ -303,25 +303,20 @@ static struct property htab_size_prop = {
.value = &htab_size_bytes,
};
-static struct property kernel_end_prop = {
- .name = "linux,kernel-end",
- .length = sizeof(unsigned long),
- .value = &kernel_end,
-};
-
-static void __init export_htab_values(void)
+static int __init export_htab_values(void)
{
struct device_node *node;
struct property *prop;
+ /* On machines with no htab htab_address is NULL */
+ if (!htab_address)
+ return -ENODEV;
+
node = of_find_node_by_path("/chosen");
if (!node)
- return;
+ return -ENODEV;
/* remove any stale propertys so ours can be found */
- prop = of_find_property(node, kernel_end_prop.name, NULL);
- if (prop)
- prom_remove_property(node, prop);
prop = of_find_property(node, htab_base_prop.name, NULL);
if (prop)
prom_remove_property(node, prop);
@@ -329,68 +324,11 @@ static void __init export_htab_values(void)
if (prop)
prom_remove_property(node, prop);
- /* information needed by userspace when using default_machine_kexec */
- kernel_end = __pa(_end);
- prom_add_property(node, &kernel_end_prop);
-
- /* On machines with no htab htab_address is NULL */
- if (NULL == htab_address)
- goto out;
-
htab_base = __pa(htab_address);
prom_add_property(node, &htab_base_prop);
prom_add_property(node, &htab_size_prop);
- out:
- of_node_put(node);
-}
-
-static struct property crashk_base_prop = {
- .name = "linux,crashkernel-base",
- .length = sizeof(unsigned long),
- .value = &crashk_res.start,
-};
-
-static unsigned long crashk_size;
-
-static struct property crashk_size_prop = {
- .name = "linux,crashkernel-size",
- .length = sizeof(unsigned long),
- .value = &crashk_size,
-};
-
-static void __init export_crashk_values(void)
-{
- struct device_node *node;
- struct property *prop;
-
- node = of_find_node_by_path("/chosen");
- if (!node)
- return;
-
- /* There might be existing crash kernel properties, but we can't
- * be sure what's in them, so remove them. */
- prop = of_find_property(node, "linux,crashkernel-base", NULL);
- if (prop)
- prom_remove_property(node, prop);
-
- prop = of_find_property(node, "linux,crashkernel-size", NULL);
- if (prop)
- prom_remove_property(node, prop);
-
- if (crashk_res.start != 0) {
- prom_add_property(node, &crashk_base_prop);
- crashk_size = crashk_res.end - crashk_res.start + 1;
- prom_add_property(node, &crashk_size_prop);
- }
-
of_node_put(node);
-}
-
-static int __init kexec_setup(void)
-{
- export_htab_values();
- export_crashk_values();
return 0;
}
-__initcall(kexec_setup);
+late_initcall(export_htab_values);
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 5c33bc14bd9f..15f28e0de78d 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -29,6 +29,7 @@
#include <asm/asm-offsets.h>
#include <asm/processor.h>
#include <asm/kexec.h>
+#include <asm/bug.h>
.text
@@ -271,231 +272,6 @@ _GLOBAL(real_writeb)
#endif /* CONFIG_40x */
-/*
- * Flush MMU TLB
- */
-#ifndef CONFIG_FSL_BOOKE
-_GLOBAL(_tlbil_all)
-_GLOBAL(_tlbil_pid)
-#endif
-_GLOBAL(_tlbia)
-#if defined(CONFIG_40x)
- sync /* Flush to memory before changing mapping */
- tlbia
- isync /* Flush shadow TLB */
-#elif defined(CONFIG_44x)
- li r3,0
- sync
-
- /* Load high watermark */
- lis r4,tlb_44x_hwater@ha
- lwz r5,tlb_44x_hwater@l(r4)
-
-1: tlbwe r3,r3,PPC44x_TLB_PAGEID
- addi r3,r3,1
- cmpw 0,r3,r5
- ble 1b
-
- isync
-#elif defined(CONFIG_FSL_BOOKE)
- /* Invalidate all entries in TLB0 */
- li r3, 0x04
- tlbivax 0,3
- /* Invalidate all entries in TLB1 */
- li r3, 0x0c
- tlbivax 0,3
- msync
-#ifdef CONFIG_SMP
- tlbsync
-#endif /* CONFIG_SMP */
-#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
-#if defined(CONFIG_SMP)
- rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
- lwz r8,TI_CPU(r8)
- oris r8,r8,10
- mfmsr r10
- SYNC
- rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
- rlwinm r0,r0,0,28,26 /* clear DR */
- mtmsr r0
- SYNC_601
- isync
- lis r9,mmu_hash_lock@h
- ori r9,r9,mmu_hash_lock@l
- tophys(r9,r9)
-10: lwarx r7,0,r9
- cmpwi 0,r7,0
- bne- 10b
- stwcx. r8,0,r9
- bne- 10b
- sync
- tlbia
- sync
- TLBSYNC
- li r0,0
- stw r0,0(r9) /* clear mmu_hash_lock */
- mtmsr r10
- SYNC_601
- isync
-#else /* CONFIG_SMP */
- sync
- tlbia
- sync
-#endif /* CONFIG_SMP */
-#endif /* ! defined(CONFIG_40x) */
- blr
-
-/*
- * Flush MMU TLB for a particular address
- */
-#ifndef CONFIG_FSL_BOOKE
-_GLOBAL(_tlbil_va)
-#endif
-_GLOBAL(_tlbie)
-#if defined(CONFIG_40x)
- /* We run the search with interrupts disabled because we have to change
- * the PID and I don't want to preempt when that happens.
- */
- mfmsr r5
- mfspr r6,SPRN_PID
- wrteei 0
- mtspr SPRN_PID,r4
- tlbsx. r3, 0, r3
- mtspr SPRN_PID,r6
- wrtee r5
- bne 10f
- sync
- /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
- * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
- * the TLB entry. */
- tlbwe r3, r3, TLB_TAG
- isync
-10:
-
-#elif defined(CONFIG_44x)
- mfspr r5,SPRN_MMUCR
- rlwimi r5,r4,0,24,31 /* Set TID */
-
- /* We have to run the search with interrupts disabled, even critical
- * and debug interrupts (in fact the only critical exceptions we have
- * are debug and machine check). Otherwise an interrupt which causes
- * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
- mfmsr r4
- lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
- addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
- andc r6,r4,r6
- mtmsr r6
- mtspr SPRN_MMUCR,r5
- tlbsx. r3, 0, r3
- mtmsr r4
- bne 10f
- sync
- /* There are only 64 TLB entries, so r3 < 64,
- * which means bit 22, is clear. Since 22 is
- * the V bit in the TLB_PAGEID, loading this
- * value will invalidate the TLB entry.
- */
- tlbwe r3, r3, PPC44x_TLB_PAGEID
- isync
-10:
-#elif defined(CONFIG_FSL_BOOKE)
- rlwinm r4, r3, 0, 0, 19
- ori r5, r4, 0x08 /* TLBSEL = 1 */
- tlbivax 0, r4
- tlbivax 0, r5
- msync
-#if defined(CONFIG_SMP)
- tlbsync
-#endif /* CONFIG_SMP */
-#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
-#if defined(CONFIG_SMP)
- rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
- lwz r8,TI_CPU(r8)
- oris r8,r8,11
- mfmsr r10
- SYNC
- rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
- rlwinm r0,r0,0,28,26 /* clear DR */
- mtmsr r0
- SYNC_601
- isync
- lis r9,mmu_hash_lock@h
- ori r9,r9,mmu_hash_lock@l
- tophys(r9,r9)
-10: lwarx r7,0,r9
- cmpwi 0,r7,0
- bne- 10b
- stwcx. r8,0,r9
- bne- 10b
- eieio
- tlbie r3
- sync
- TLBSYNC
- li r0,0
- stw r0,0(r9) /* clear mmu_hash_lock */
- mtmsr r10
- SYNC_601
- isync
-#else /* CONFIG_SMP */
- tlbie r3
- sync
-#endif /* CONFIG_SMP */
-#endif /* ! CONFIG_40x */
- blr
-
-#if defined(CONFIG_FSL_BOOKE)
-/*
- * Flush MMU TLB, but only on the local processor (no broadcast)
- */
-_GLOBAL(_tlbil_all)
-#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
- MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
- li r3,(MMUCSR0_TLBFI)@l
- mtspr SPRN_MMUCSR0, r3
-1:
- mfspr r3,SPRN_MMUCSR0
- andi. r3,r3,MMUCSR0_TLBFI@l
- bne 1b
- blr
-
-/*
- * Flush MMU TLB for a particular process id, but only on the local processor
- * (no broadcast)
- */
-_GLOBAL(_tlbil_pid)
-/* we currently do an invalidate all since we don't have per pid invalidate */
- li r3,(MMUCSR0_TLBFI)@l
- mtspr SPRN_MMUCSR0, r3
-1:
- mfspr r3,SPRN_MMUCSR0
- andi. r3,r3,MMUCSR0_TLBFI@l
- bne 1b
- msync
- isync
- blr
-
-/*
- * Flush MMU TLB for a particular address, but only on the local processor
- * (no broadcast)
- */
-_GLOBAL(_tlbil_va)
- mfmsr r10
- wrteei 0
- slwi r4,r4,16
- mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
- tlbsx 0,r3
- mfspr r4,SPRN_MAS1 /* check valid */
- andis. r3,r4,MAS1_VALID@h
- beq 1f
- rlwinm r4,r4,0,1,31
- mtspr SPRN_MAS1,r4
- tlbwe
- msync
- isync
-1: wrtee r10
- blr
-#endif /* CONFIG_FSL_BOOKE */
-
/*
* Flush instruction cache.
@@ -650,8 +426,8 @@ _GLOBAL(__flush_dcache_icache)
BEGIN_FTR_SECTION
blr
END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
- rlwinm r3,r3,0,0,19 /* Get page base address */
- li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
+ rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
+ li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
mtctr r4
mr r6,r3
0: dcbst 0,r3 /* Write line to ram */
@@ -691,8 +467,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
rlwinm r0,r10,0,28,26 /* clear DR */
mtmsr r0
isync
- rlwinm r3,r3,0,0,19 /* Get page base address */
- li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
+ rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
+ li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
mtctr r4
mr r6,r3
0: dcbst 0,r3 /* Write line to ram */
@@ -716,7 +492,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
* void clear_pages(void *page, int order) ;
*/
_GLOBAL(clear_pages)
- li r0,4096/L1_CACHE_BYTES
+ li r0,PAGE_SIZE/L1_CACHE_BYTES
slw r0,r0,r4
mtctr r0
#ifdef CONFIG_8xx
@@ -774,7 +550,7 @@ _GLOBAL(copy_page)
dcbt r5,r4
li r11,L1_CACHE_BYTES+4
#endif /* MAX_COPY_PREFETCH */
- li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
+ li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
crclr 4*cr0+eq
2:
mtctr r0
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 7ff292475269..43e7e3a7f130 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -78,6 +78,12 @@ int module_finalize(const Elf_Ehdr *hdr,
(void *)sect->sh_addr,
(void *)sect->sh_addr + sect->sh_size);
+ sect = find_section(hdr, sechdrs, "__mmu_ftr_fixup");
+ if (sect != NULL)
+ do_feature_fixups(cur_cpu_spec->mmu_features,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
+
#ifdef CONFIG_PPC64
sect = find_section(hdr, sechdrs, "__fw_ftr_fixup");
if (sect != NULL)
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index f3c9cae01dd5..fa983a59c4ce 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -14,7 +14,6 @@ static void of_device_make_bus_id(struct of_device *dev)
{
static atomic_t bus_no_reg_magic;
struct device_node *node = dev->node;
- char *name = dev->dev.bus_id;
const u32 *reg;
u64 addr;
int magic;
@@ -27,14 +26,12 @@ static void of_device_make_bus_id(struct of_device *dev)
reg = of_get_property(node, "dcr-reg", NULL);
if (reg) {
#ifdef CONFIG_PPC_DCR_NATIVE
- snprintf(name, BUS_ID_SIZE, "d%x.%s",
- *reg, node->name);
+ dev_set_name(&dev->dev, "d%x.%s", *reg, node->name);
#else /* CONFIG_PPC_DCR_NATIVE */
addr = of_translate_dcr_address(node, *reg, NULL);
if (addr != OF_BAD_ADDR) {
- snprintf(name, BUS_ID_SIZE,
- "D%llx.%s", (unsigned long long)addr,
- node->name);
+ dev_set_name(&dev->dev, "D%llx.%s",
+ (unsigned long long)addr, node->name);
return;
}
#endif /* !CONFIG_PPC_DCR_NATIVE */
@@ -48,9 +45,8 @@ static void of_device_make_bus_id(struct of_device *dev)
if (reg) {
addr = of_translate_address(node, reg);
if (addr != OF_BAD_ADDR) {
- snprintf(name, BUS_ID_SIZE,
- "%llx.%s", (unsigned long long)addr,
- node->name);
+ dev_set_name(&dev->dev, "%llx.%s",
+ (unsigned long long)addr, node->name);
return;
}
}
@@ -60,7 +56,7 @@ static void of_device_make_bus_id(struct of_device *dev)
* counter (and pray...)
*/
magic = atomic_add_return(1, &bus_no_reg_magic);
- snprintf(name, BUS_ID_SIZE, "%s.%d", node->name, magic - 1);
+ dev_set_name(&dev->dev, "%s.%d", node->name, magic - 1);
}
struct of_device *of_device_alloc(struct device_node *np,
@@ -80,7 +76,7 @@ struct of_device *of_device_alloc(struct device_node *np,
dev->dev.archdata.of_node = np;
if (bus_id)
- strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
+ dev_set_name(&dev->dev, bus_id);
else
of_device_make_bus_id(dev);
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 48a347133f41..c744b327bcab 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -37,6 +37,7 @@ struct lppaca lppaca[] = {
.end_of_quantum = 0xfffffffffffffffful,
.slb_count = 64,
.vmxregs_in_use = 0,
+ .page_ins = 0,
},
};
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index f36936d9fda3..2538030954d8 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -37,13 +37,7 @@
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
-
-#ifdef DEBUG
-#include <asm/udbg.h>
-#define DBG(fmt...) printk(fmt)
-#else
-#define DBG(fmt...)
-#endif
+#include <asm/eeh.h>
static DEFINE_SPINLOCK(hose_spinlock);
@@ -53,8 +47,9 @@ static int global_phb_number; /* Global phb counter */
/* ISA Memory physical address */
resource_size_t isa_mem_base;
-/* Default PCI flags is 0 */
-unsigned int ppc_pci_flags;
+/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
+unsigned int ppc_pci_flags = 0;
+
static struct dma_mapping_ops *pci_dma_ops;
@@ -165,8 +160,6 @@ EXPORT_SYMBOL(pci_domain_nr);
*/
struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
{
- if (!have_of)
- return NULL;
while(node) {
struct pci_controller *hose, *tmp;
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
@@ -208,26 +201,6 @@ char __devinit *pcibios_setup(char *str)
return str;
}
-void __devinit pcibios_setup_new_device(struct pci_dev *dev)
-{
- struct dev_archdata *sd = &dev->dev.archdata;
-
- sd->of_node = pci_device_to_OF_node(dev);
-
- DBG("PCI: device %s OF node: %s\n", pci_name(dev),
- sd->of_node ? sd->of_node->full_name : "<none>");
-
- sd->dma_ops = pci_dma_ops;
-#ifdef CONFIG_PPC32
- sd->dma_data = (void *)PCI_DRAM_OFFSET;
-#endif
- set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
-
- if (ppc_md.pci_dma_dev_setup)
- ppc_md.pci_dma_dev_setup(dev);
-}
-EXPORT_SYMBOL(pcibios_setup_new_device);
-
/*
* Reads the interrupt pin to determine if interrupt is use by card.
* If the interrupt is used, then gets the interrupt line from the
@@ -252,7 +225,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
return -1;
#endif
- DBG("Try to map irq for %s...\n", pci_name(pci_dev));
+ pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
#ifdef DEBUG
memset(&oirq, 0xff, sizeof(oirq));
@@ -276,26 +249,26 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
line == 0xff || line == 0) {
return -1;
}
- DBG(" -> no map ! Using line %d (pin %d) from PCI config\n",
- line, pin);
+ pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
+ line, pin);
virq = irq_create_mapping(NULL, line);
if (virq != NO_IRQ)
set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
} else {
- DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
- oirq.size, oirq.specifier[0], oirq.specifier[1],
+ pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
+ oirq.size, oirq.specifier[0], oirq.specifier[1],
oirq.controller->full_name);
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
}
if(virq == NO_IRQ) {
- DBG(" -> failed to map !\n");
+ pr_debug(" Failed to map !\n");
return -1;
}
- DBG(" -> mapped to linux irq %d\n", virq);
+ pr_debug(" Mapped to linux irq %d\n", virq);
pci_dev->irq = virq;
@@ -397,13 +370,10 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
}
/* XXX would be nice to have a way to ask for write-through */
- prot |= _PAGE_NO_CACHE;
if (write_combine)
- prot &= ~_PAGE_GUARDED;
+ return pgprot_noncached_wc(prot);
else
- prot |= _PAGE_GUARDED;
-
- return __pgprot(prot);
+ return pgprot_noncached(prot);
}
/*
@@ -414,19 +384,17 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long pfn,
unsigned long size,
- pgprot_t protection)
+ pgprot_t prot)
{
struct pci_dev *pdev = NULL;
struct resource *found = NULL;
- unsigned long prot = pgprot_val(protection);
resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
int i;
if (page_is_ram(pfn))
- return __pgprot(prot);
-
- prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
+ return prot;
+ prot = pgprot_noncached(prot);
for_each_pci_dev(pdev) {
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
struct resource *rp = &pdev->resource[i];
@@ -447,14 +415,14 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
}
if (found) {
if (found->flags & IORESOURCE_PREFETCH)
- prot &= ~_PAGE_GUARDED;
+ prot = pgprot_noncached_wc(prot);
pci_dev_put(pdev);
}
- DBG("non-PCI map for %llx, prot: %lx\n",
- (unsigned long long)offset, prot);
+ pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
+ (unsigned long long)offset, pgprot_val(prot));
- return __pgprot(prot);
+ return prot;
}
@@ -610,8 +578,7 @@ int pci_mmap_legacy_page_range(struct pci_bus *bus,
pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
vma->vm_pgoff = offset >> PAGE_SHIFT;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
@@ -853,15 +820,12 @@ void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
int pci_proc_domain(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
-#ifdef CONFIG_PPC64
- return hose->buid != 0;
-#else
+
if (!(ppc_pci_flags & PPC_PCI_ENABLE_PROC_DOMAINS))
return 0;
if (ppc_pci_flags & PPC_PCI_COMPAT_DOMAIN_0)
return hose->global_number != 0;
return 1;
-#endif
}
void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
@@ -1083,27 +1047,50 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
}
}
-static void __devinit __pcibios_fixup_bus(struct pci_bus *bus)
+void __devinit pcibios_setup_bus_self(struct pci_bus *bus)
{
- struct pci_dev *dev = bus->self;
-
- pr_debug("PCI: Fixup bus %d (%s)\n", bus->number, dev ? pci_name(dev) : "PHB");
-
- /* Fixup PCI<->PCI bridges. Host bridges are handled separately, for
- * now differently between 32 and 64 bits.
- */
- if (dev != NULL)
+ /* Fix up the bus resources for P2P bridges */
+ if (bus->self != NULL)
pcibios_fixup_bridge(bus);
- /* Additional setup that is different between 32 and 64 bits for now */
- pcibios_do_bus_setup(bus);
-
- /* Platform specific bus fixups */
+ /* Platform specific bus fixups. This is currently only used
+ * by fsl_pci and I'm hoping to get rid of it at some point
+ */
if (ppc_md.pcibios_fixup_bus)
ppc_md.pcibios_fixup_bus(bus);
- /* Read default IRQs and fixup if necessary */
+ /* Setup bus DMA mappings */
+ if (ppc_md.pci_dma_bus_setup)
+ ppc_md.pci_dma_bus_setup(bus);
+}
+
+void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ pr_debug("PCI: Fixup bus devices %d (%s)\n",
+ bus->number, bus->self ? pci_name(bus->self) : "PHB");
+
list_for_each_entry(dev, &bus->devices, bus_list) {
+ struct dev_archdata *sd = &dev->dev.archdata;
+
+ /* Setup OF node pointer in archdata */
+ sd->of_node = pci_device_to_OF_node(dev);
+
+ /* Fixup NUMA node as it may not be setup yet by the generic
+ * code and is needed by the DMA init
+ */
+ set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
+
+ /* Hook up default DMA ops */
+ sd->dma_ops = pci_dma_ops;
+ sd->dma_data = (void *)PCI_DRAM_OFFSET;
+
+ /* Additional platform DMA/iommu setup */
+ if (ppc_md.pci_dma_dev_setup)
+ ppc_md.pci_dma_dev_setup(dev);
+
+ /* Read default IRQs and fixup if necessary */
pci_read_irq_line(dev);
if (ppc_md.pci_irq_fixup)
ppc_md.pci_irq_fixup(dev);
@@ -1113,22 +1100,19 @@ static void __devinit __pcibios_fixup_bus(struct pci_bus *bus)
void __devinit pcibios_fixup_bus(struct pci_bus *bus)
{
/* When called from the generic PCI probe, read PCI<->PCI bridge
- * bases before proceeding
+ * bases. This is -not- called when generating the PCI tree from
+ * the OF device-tree.
*/
if (bus->self != NULL)
pci_read_bridge_bases(bus);
- __pcibios_fixup_bus(bus);
-}
-EXPORT_SYMBOL(pcibios_fixup_bus);
-/* When building a bus from the OF tree rather than probing, we need a
- * slightly different version of the fixup which doesn't read the
- * bridge bases using config space accesses
- */
-void __devinit pcibios_fixup_of_probed_bus(struct pci_bus *bus)
-{
- __pcibios_fixup_bus(bus);
+ /* Now fixup the bus bus */
+ pcibios_setup_bus_self(bus);
+
+ /* Now fixup devices on that bus */
+ pcibios_setup_bus_devices(bus);
}
+EXPORT_SYMBOL(pcibios_fixup_bus);
static int skip_isa_ioresource_align(struct pci_dev *dev)
{
@@ -1198,10 +1182,10 @@ static int __init reparent_resources(struct resource *parent,
*pp = NULL;
for (p = res->child; p != NULL; p = p->sibling) {
p->parent = res;
- DBG(KERN_INFO "PCI: reparented %s [%llx..%llx] under %s\n",
- p->name,
- (unsigned long long)p->start,
- (unsigned long long)p->end, res->name);
+ pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
+ p->name,
+ (unsigned long long)p->start,
+ (unsigned long long)p->end, res->name);
}
return 0;
}
@@ -1245,9 +1229,12 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
int i;
struct resource *res, *pr;
+ pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
+ pci_domain_nr(bus), bus->number);
+
for (i = 0; i < PCI_BUS_NUM_RESOURCES; ++i) {
if ((res = bus->resource[i]) == NULL || !res->flags
- || res->start > res->end)
+ || res->start > res->end || res->parent)
continue;
if (bus->parent == NULL)
pr = (res->flags & IORESOURCE_IO) ?
@@ -1271,14 +1258,14 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
}
}
- DBG("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
- "[0x%x], parent %p (%s)\n",
- bus->self ? pci_name(bus->self) : "PHB",
- bus->number, i,
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned int)res->flags,
- pr, (pr && pr->name) ? pr->name : "nil");
+ pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
+ "[0x%x], parent %p (%s)\n",
+ bus->self ? pci_name(bus->self) : "PHB",
+ bus->number, i,
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ (unsigned int)res->flags,
+ pr, (pr && pr->name) ? pr->name : "nil");
if (pr && !(pr->flags & IORESOURCE_UNSET)) {
if (request_resource(pr, res) == 0)
@@ -1305,11 +1292,11 @@ static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
{
struct resource *pr, *r = &dev->resource[idx];
- DBG("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
- pci_name(dev), idx,
- (unsigned long long)r->start,
- (unsigned long long)r->end,
- (unsigned int)r->flags);
+ pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
+ pci_name(dev), idx,
+ (unsigned long long)r->start,
+ (unsigned long long)r->end,
+ (unsigned int)r->flags);
pr = pci_find_parent_resource(dev, r);
if (!pr || (pr->flags & IORESOURCE_UNSET) ||
@@ -1317,10 +1304,11 @@ static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
" of device %s, will remap\n", idx, pci_name(dev));
if (pr)
- DBG("PCI: parent is %p: %016llx-%016llx [%x]\n", pr,
- (unsigned long long)pr->start,
- (unsigned long long)pr->end,
- (unsigned int)pr->flags);
+ pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
+ pr,
+ (unsigned long long)pr->start,
+ (unsigned long long)pr->end,
+ (unsigned int)pr->flags);
/* We'll assign a new address later */
r->flags |= IORESOURCE_UNSET;
r->end -= r->start;
@@ -1358,7 +1346,8 @@ static void __init pcibios_allocate_resources(int pass)
* but keep it unregistered.
*/
u32 reg;
- DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
+ pr_debug("PCI: Switching off ROM of %s\n",
+ pci_name(dev));
r->flags &= ~IORESOURCE_ROM_ENABLE;
pci_read_config_dword(dev, dev->rom_base_reg, &reg);
pci_write_config_dword(dev, dev->rom_base_reg,
@@ -1383,7 +1372,7 @@ void __init pcibios_resource_survey(void)
}
if (!(ppc_pci_flags & PPC_PCI_PROBE_ONLY)) {
- DBG("PCI: Assigning unassigned resouces...\n");
+ pr_debug("PCI: Assigning unassigned resouces...\n");
pci_assign_unassigned_resources();
}
@@ -1393,9 +1382,11 @@ void __init pcibios_resource_survey(void)
}
#ifdef CONFIG_HOTPLUG
-/* This is used by the pSeries hotplug driver to allocate resource
+
+/* This is used by the PCI hotplug driver to allocate resource
* of newly plugged busses. We can try to consolidate with the
- * rest of the code later, for now, keep it as-is
+ * rest of the code later, for now, keep it as-is as our main
+ * resource allocation function doesn't deal with sub-trees yet.
*/
void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
{
@@ -1410,6 +1401,14 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
if (r->parent || !r->start || !r->flags)
continue;
+
+ pr_debug("PCI: Claiming %s: "
+ "Resource %d: %016llx..%016llx [%x]\n",
+ pci_name(dev), i,
+ (unsigned long long)r->start,
+ (unsigned long long)r->end,
+ (unsigned int)r->flags);
+
pci_claim_resource(dev, i);
}
}
@@ -1418,6 +1417,31 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
pcibios_claim_one_bus(child_bus);
}
EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
+
+
+/* pcibios_finish_adding_to_bus
+ *
+ * This is to be called by the hotplug code after devices have been
+ * added to a bus, this include calling it for a PHB that is just
+ * being added
+ */
+void pcibios_finish_adding_to_bus(struct pci_bus *bus)
+{
+ pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
+ pci_domain_nr(bus), bus->number);
+
+ /* Allocate bus and devices resources */
+ pcibios_allocate_bus_resources(bus);
+ pcibios_claim_one_bus(bus);
+
+ /* Add new devices to global lists. Register in proc, sysfs. */
+ pci_bus_add_devices(bus);
+
+ /* Fixup EEH */
+ eeh_add_device_tree_late(bus);
+}
+EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
+
#endif /* CONFIG_HOTPLUG */
int pcibios_enable_device(struct pci_dev *dev, int mask)
@@ -1428,3 +1452,61 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return pci_enable_resources(dev, mask);
}
+
+void __devinit pcibios_setup_phb_resources(struct pci_controller *hose)
+{
+ struct pci_bus *bus = hose->bus;
+ struct resource *res;
+ int i;
+
+ /* Hookup PHB IO resource */
+ bus->resource[0] = res = &hose->io_resource;
+
+ if (!res->flags) {
+ printk(KERN_WARNING "PCI: I/O resource not set for host"
+ " bridge %s (domain %d)\n",
+ hose->dn->full_name, hose->global_number);
+#ifdef CONFIG_PPC32
+ /* Workaround for lack of IO resource only on 32-bit */
+ res->start = (unsigned long)hose->io_base_virt - isa_io_base;
+ res->end = res->start + IO_SPACE_LIMIT;
+ res->flags = IORESOURCE_IO;
+#endif /* CONFIG_PPC32 */
+ }
+
+ pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ (unsigned long)res->flags);
+
+ /* Hookup PHB Memory resources */
+ for (i = 0; i < 3; ++i) {
+ res = &hose->mem_resources[i];
+ if (!res->flags) {
+ if (i > 0)
+ continue;
+ printk(KERN_ERR "PCI: Memory resource 0 not set for "
+ "host bridge %s (domain %d)\n",
+ hose->dn->full_name, hose->global_number);
+#ifdef CONFIG_PPC32
+ /* Workaround for lack of MEM resource only on 32-bit */
+ res->start = hose->pci_mem_offset;
+ res->end = (resource_size_t)-1LL;
+ res->flags = IORESOURCE_MEM;
+#endif /* CONFIG_PPC32 */
+ }
+ bus->resource[i+1] = res;
+
+ pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", i,
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ (unsigned long)res->flags);
+ }
+
+ pr_debug("PCI: PHB MEM offset = %016llx\n",
+ (unsigned long long)hose->pci_mem_offset);
+ pr_debug("PCI: PHB IO offset = %08lx\n",
+ (unsigned long)hose->io_base_virt - _IO_BASE);
+
+}
+
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 131b1dfa68c6..132cd80afa21 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -26,12 +26,6 @@
#undef DEBUG
-#ifdef DEBUG
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
-
unsigned long isa_io_base = 0;
unsigned long pci_dram_offset = 0;
int pcibios_assign_bus_offset = 1;
@@ -272,17 +266,14 @@ pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
{
struct device_node *parent, *np;
- if (!have_of)
- return NULL;
-
- DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
+ pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
parent = scan_OF_for_pci_bus(bus);
if (parent == NULL)
return NULL;
- DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>");
+ pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
np = scan_OF_for_pci_dev(parent, devfn);
of_node_put(parent);
- DBG(" result is %s\n", np ? np->full_name : "<NULL>");
+ pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
/* XXX most callers don't release the returned node
* mostly because ppc64 doesn't increase the refcount,
@@ -315,8 +306,6 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
struct pci_controller* hose;
struct pci_dev* dev = NULL;
- if (!have_of)
- return -ENODEV;
/* Make sure it's really a PCI device */
hose = pci_find_hose_for_OF_device(node);
if (!hose || !hose->dn)
@@ -379,10 +368,41 @@ void pcibios_make_OF_bus_map(void)
}
#endif /* CONFIG_PPC_OF */
+static void __devinit pcibios_scan_phb(struct pci_controller *hose)
+{
+ struct pci_bus *bus;
+ struct device_node *node = hose->dn;
+ unsigned long io_offset;
+ struct resource *res = &hose->io_resource;
+
+ pr_debug("PCI: Scanning PHB %s\n",
+ node ? node->full_name : "<NO NAME>");
+
+ /* Create an empty bus for the toplevel */
+ bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
+ if (bus == NULL) {
+ printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
+ hose->global_number);
+ return;
+ }
+ bus->secondary = hose->first_busno;
+ hose->bus = bus;
+
+ /* Fixup IO space offset */
+ io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
+ res->start = (res->start + io_offset) & 0xffffffffu;
+ res->end = (res->end + io_offset) & 0xffffffffu;
+
+ /* Wire up PHB bus resources */
+ pcibios_setup_phb_resources(hose);
+
+ /* Scan children */
+ hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
+}
+
static int __init pcibios_init(void)
{
struct pci_controller *hose, *tmp;
- struct pci_bus *bus;
int next_busno = 0;
printk(KERN_INFO "PCI: Probing PCI hardware\n");
@@ -395,12 +415,8 @@ static int __init pcibios_init(void)
if (pci_assign_all_buses)
hose->first_busno = next_busno;
hose->last_busno = 0xff;
- bus = pci_scan_bus_parented(hose->parent, hose->first_busno,
- hose->ops, hose);
- if (bus) {
- pci_bus_add_devices(bus);
- hose->last_busno = bus->subordinate;
- }
+ pcibios_scan_phb(hose);
+ pci_bus_add_devices(hose->bus);
if (pci_assign_all_buses || next_busno <= hose->last_busno)
next_busno = hose->last_busno + pcibios_assign_bus_offset;
}
@@ -410,7 +426,7 @@ static int __init pcibios_init(void)
* numbers vs. kernel bus numbers since we may have to
* remap them.
*/
- if (pci_assign_all_buses && have_of)
+ if (pci_assign_all_buses)
pcibios_make_OF_bus_map();
/* Call common code to handle resource allocation */
@@ -425,54 +441,6 @@ static int __init pcibios_init(void)
subsys_initcall(pcibios_init);
-void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
-{
- struct pci_controller *hose = (struct pci_controller *) bus->sysdata;
- unsigned long io_offset;
- struct resource *res;
- int i;
- struct pci_dev *dev;
-
- /* Hookup PHB resources */
- io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
- if (bus->parent == NULL) {
- /* This is a host bridge - fill in its resources */
- hose->bus = bus;
-
- bus->resource[0] = res = &hose->io_resource;
- if (!res->flags) {
- if (io_offset)
- printk(KERN_ERR "I/O resource not set for host"
- " bridge %d\n", hose->global_number);
- res->start = 0;
- res->end = IO_SPACE_LIMIT;
- res->flags = IORESOURCE_IO;
- }
- res->start = (res->start + io_offset) & 0xffffffffu;
- res->end = (res->end + io_offset) & 0xffffffffu;
-
- for (i = 0; i < 3; ++i) {
- res = &hose->mem_resources[i];
- if (!res->flags) {
- if (i > 0)
- continue;
- printk(KERN_ERR "Memory resource not set for "
- "host bridge %d\n", hose->global_number);
- res->start = hose->pci_mem_offset;
- res->end = ~0U;
- res->flags = IORESOURCE_MEM;
- }
- bus->resource[i+1] = res;
- }
- }
-
- if (ppc_md.pci_dma_bus_setup)
- ppc_md.pci_dma_bus_setup(bus);
-
- list_for_each_entry(dev, &bus->devices, bus_list)
- pcibios_setup_new_device(dev);
-}
-
/* the next one is stolen from the alpha port... */
void __init
pcibios_update_irq(struct pci_dev *dev, int irq)
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 3502b9101e6b..39fadc6e1492 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -32,13 +32,6 @@
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
-#ifdef DEBUG
-#include <asm/udbg.h>
-#define DBG(fmt...) printk(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
unsigned long pci_probe_only = 1;
/* pci_io_base -- the base address from which io bars are offsets.
@@ -102,7 +95,7 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
addrs = of_get_property(node, "assigned-addresses", &proplen);
if (!addrs)
return;
- DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
+ pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
for (; proplen >= 20; proplen -= 20, addrs += 5) {
flags = pci_parse_of_flags(addrs[0]);
if (!flags)
@@ -112,8 +105,9 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev)
if (!size)
continue;
i = addrs[0] & 0xff;
- DBG(" base: %llx, size: %llx, i: %x\n",
- (unsigned long long)base, (unsigned long long)size, i);
+ pr_debug(" base: %llx, size: %llx, i: %x\n",
+ (unsigned long long)base,
+ (unsigned long long)size, i);
if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
@@ -144,7 +138,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
if (type == NULL)
type = "";
- DBG(" create device, devfn: %x, type: %s\n", devfn, type);
+ pr_debug(" create device, devfn: %x, type: %s\n", devfn, type);
dev->bus = bus;
dev->sysdata = node;
@@ -165,8 +159,8 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
dev->class = get_int_prop(node, "class-code", 0);
dev->revision = get_int_prop(node, "revision-id", 0);
- DBG(" class: 0x%x\n", dev->class);
- DBG(" revision: 0x%x\n", dev->revision);
+ pr_debug(" class: 0x%x\n", dev->class);
+ pr_debug(" revision: 0x%x\n", dev->revision);
dev->current_state = 4; /* unknown power state */
dev->error_state = pci_channel_io_normal;
@@ -187,7 +181,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
pci_parse_of_addrs(node, dev);
- DBG(" adding to system ...\n");
+ pr_debug(" adding to system ...\n");
pci_device_add(dev, bus);
@@ -195,19 +189,20 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
}
EXPORT_SYMBOL(of_create_pci_dev);
-void __devinit of_scan_bus(struct device_node *node,
- struct pci_bus *bus)
+static void __devinit __of_scan_bus(struct device_node *node,
+ struct pci_bus *bus, int rescan_existing)
{
struct device_node *child;
const u32 *reg;
int reglen, devfn;
struct pci_dev *dev;
- DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number);
+ pr_debug("of_scan_bus(%s) bus no %d... \n",
+ node->full_name, bus->number);
/* Scan direct children */
for_each_child_of_node(node, child) {
- DBG(" * %s\n", child->full_name);
+ pr_debug(" * %s\n", child->full_name);
reg = of_get_property(child, "reg", &reglen);
if (reg == NULL || reglen < 20)
continue;
@@ -217,11 +212,15 @@ void __devinit of_scan_bus(struct device_node *node,
dev = of_create_pci_dev(child, bus, devfn);
if (!dev)
continue;
- DBG(" dev header type: %x\n", dev->hdr_type);
+ pr_debug(" dev header type: %x\n", dev->hdr_type);
}
- /* Ally all fixups */
- pcibios_fixup_of_probed_bus(bus);
+ /* Apply all fixups necessary. We don't fixup the bus "self"
+ * for an existing bridge that is being rescanned
+ */
+ if (!rescan_existing)
+ pcibios_setup_bus_self(bus);
+ pcibios_setup_bus_devices(bus);
/* Now scan child busses */
list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -233,7 +232,20 @@ void __devinit of_scan_bus(struct device_node *node,
}
}
}
-EXPORT_SYMBOL(of_scan_bus);
+
+void __devinit of_scan_bus(struct device_node *node,
+ struct pci_bus *bus)
+{
+ __of_scan_bus(node, bus, 0);
+}
+EXPORT_SYMBOL_GPL(of_scan_bus);
+
+void __devinit of_rescan_bus(struct device_node *node,
+ struct pci_bus *bus)
+{
+ __of_scan_bus(node, bus, 1);
+}
+EXPORT_SYMBOL_GPL(of_rescan_bus);
void __devinit of_scan_pci_bridge(struct device_node *node,
struct pci_dev *dev)
@@ -245,7 +257,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
unsigned int flags;
u64 size;
- DBG("of_scan_pci_bridge(%s)\n", node->full_name);
+ pr_debug("of_scan_pci_bridge(%s)\n", node->full_name);
/* parse bus-range property */
busrange = of_get_property(node, "bus-range", &len);
@@ -309,12 +321,12 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
}
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number);
- DBG(" bus name: %s\n", bus->name);
+ pr_debug(" bus name: %s\n", bus->name);
mode = PCI_PROBE_NORMAL;
if (ppc_md.pci_probe_mode)
mode = ppc_md.pci_probe_mode(bus);
- DBG(" probe mode: %d\n", mode);
+ pr_debug(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE)
of_scan_bus(node, bus);
@@ -327,9 +339,10 @@ void __devinit scan_phb(struct pci_controller *hose)
{
struct pci_bus *bus;
struct device_node *node = hose->dn;
- int i, mode;
+ int mode;
- DBG("PCI: Scanning PHB %s\n", node ? node->full_name : "<NO NAME>");
+ pr_debug("PCI: Scanning PHB %s\n",
+ node ? node->full_name : "<NO NAME>");
/* Create an empty bus for the toplevel */
bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node);
@@ -345,26 +358,13 @@ void __devinit scan_phb(struct pci_controller *hose)
pcibios_map_io_space(bus);
/* Wire up PHB bus resources */
- DBG("PCI: PHB IO resource = %016lx-%016lx [%lx]\n",
- hose->io_resource.start, hose->io_resource.end,
- hose->io_resource.flags);
- bus->resource[0] = &hose->io_resource;
- for (i = 0; i < 3; ++i) {
- DBG("PCI: PHB MEM resource %d = %016lx-%016lx [%lx]\n", i,
- hose->mem_resources[i].start,
- hose->mem_resources[i].end,
- hose->mem_resources[i].flags);
- bus->resource[i+1] = &hose->mem_resources[i];
- }
- DBG("PCI: PHB MEM offset = %016lx\n", hose->pci_mem_offset);
- DBG("PCI: PHB IO offset = %08lx\n",
- (unsigned long)hose->io_base_virt - _IO_BASE);
+ pcibios_setup_phb_resources(hose);
/* Get probe mode and perform scan */
mode = PCI_PROBE_NORMAL;
if (node && ppc_md.pci_probe_mode)
mode = ppc_md.pci_probe_mode(bus);
- DBG(" probe mode: %d\n", mode);
+ pr_debug(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE) {
bus->subordinate = hose->last_busno;
of_scan_bus(node, bus);
@@ -380,7 +380,7 @@ static int __init pcibios_init(void)
printk(KERN_INFO "PCI: Probing PCI hardware\n");
- /* For now, override phys_mem_access_prot. If we need it,
+ /* For now, override phys_mem_access_prot. If we need it,g
* later, we may move that initialization to each ppc_md
*/
ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
@@ -388,6 +388,11 @@ static int __init pcibios_init(void)
if (pci_probe_only)
ppc_pci_flags |= PPC_PCI_PROBE_ONLY;
+ /* On ppc64, we always enable PCI domains and we keep domain 0
+ * backward compatible in /proc for video cards
+ */
+ ppc_pci_flags |= PPC_PCI_ENABLE_PROC_DOMAINS | PPC_PCI_COMPAT_DOMAIN_0;
+
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
scan_phb(hose);
@@ -422,8 +427,8 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
if (bus->self) {
struct resource *res = bus->resource[0];
- DBG("IO unmapping for PCI-PCI bridge %s\n",
- pci_name(bus->self));
+ pr_debug("IO unmapping for PCI-PCI bridge %s\n",
+ pci_name(bus->self));
__flush_hash_table_range(&init_mm, res->start + _IO_BASE,
res->end + _IO_BASE + 1);
@@ -437,8 +442,8 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
if (hose->io_base_alloc == 0)
return 0;
- DBG("IO unmapping for PHB %s\n", hose->dn->full_name);
- DBG(" alloc=0x%p\n", hose->io_base_alloc);
+ pr_debug("IO unmapping for PHB %s\n", hose->dn->full_name);
+ pr_debug(" alloc=0x%p\n", hose->io_base_alloc);
/* This is a PHB, we fully unmap the IO area */
vunmap(hose->io_base_alloc);
@@ -463,11 +468,11 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
* thus HPTEs will be faulted in when needed
*/
if (bus->self) {
- DBG("IO mapping for PCI-PCI bridge %s\n",
- pci_name(bus->self));
- DBG(" virt=0x%016lx...0x%016lx\n",
- bus->resource[0]->start + _IO_BASE,
- bus->resource[0]->end + _IO_BASE);
+ pr_debug("IO mapping for PCI-PCI bridge %s\n",
+ pci_name(bus->self));
+ pr_debug(" virt=0x%016lx...0x%016lx\n",
+ bus->resource[0]->start + _IO_BASE,
+ bus->resource[0]->end + _IO_BASE);
return 0;
}
@@ -496,11 +501,11 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
hose->io_base_virt = (void __iomem *)(area->addr +
hose->io_base_phys - phys_page);
- DBG("IO mapping for PHB %s\n", hose->dn->full_name);
- DBG(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
- hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
- DBG(" size=0x%016lx (alloc=0x%016lx)\n",
- hose->pci_io_size, size_page);
+ pr_debug("IO mapping for PHB %s\n", hose->dn->full_name);
+ pr_debug(" phys=0x%016lx, virt=0x%p (alloc=0x%p)\n",
+ hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
+ pr_debug(" size=0x%016lx (alloc=0x%016lx)\n",
+ hose->pci_io_size, size_page);
/* Establish the mapping */
if (__ioremap_at(phys_page, area->addr, size_page,
@@ -512,24 +517,13 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
hose->io_resource.start += io_virt_offset;
hose->io_resource.end += io_virt_offset;
- DBG(" hose->io_resource=0x%016lx...0x%016lx\n",
- hose->io_resource.start, hose->io_resource.end);
+ pr_debug(" hose->io_resource=0x%016lx...0x%016lx\n",
+ hose->io_resource.start, hose->io_resource.end);
return 0;
}
EXPORT_SYMBOL_GPL(pcibios_map_io_space);
-void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- if (ppc_md.pci_dma_bus_setup)
- ppc_md.pci_dma_bus_setup(bus);
-
- list_for_each_entry(dev, &bus->devices, bus_list)
- pcibios_setup_new_device(dev);
-}
-
unsigned long pci_address_to_pio(phys_addr_t address)
{
struct pci_controller *hose, *tmp;
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 260089dccfb0..dcec1325d340 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -116,12 +116,6 @@ EXPORT_SYMBOL(giveup_spe);
#ifndef CONFIG_PPC64
EXPORT_SYMBOL(flush_instruction_cache);
-EXPORT_SYMBOL(flush_tlb_kernel_range);
-EXPORT_SYMBOL(flush_tlb_page);
-EXPORT_SYMBOL(_tlbie);
-#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
-EXPORT_SYMBOL(_tlbil_va);
-#endif
#endif
EXPORT_SYMBOL(__flush_icache_range);
EXPORT_SYMBOL(flush_dcache_range);
@@ -174,8 +168,7 @@ EXPORT_SYMBOL(cacheable_memcpy);
#endif
#ifdef CONFIG_PPC32
-EXPORT_SYMBOL(next_mmu_context);
-EXPORT_SYMBOL(set_context);
+EXPORT_SYMBOL(switch_mmu_context);
#endif
#ifdef CONFIG_PPC_STD_MMU_32
diff --git a/arch/powerpc/xmon/setjmp.S b/arch/powerpc/kernel/ppc_save_regs.S
index 04c0b305ad4a..5113bd2285e1 100644
--- a/arch/powerpc/xmon/setjmp.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -22,7 +22,7 @@
* that will be different for 32-bit and 64-bit, because of the
* different ABIs, though).
*/
-_GLOBAL(xmon_save_regs)
+_GLOBAL(ppc_save_regs)
PPC_STL r0,0*SZL(r3)
PPC_STL r2,2*SZL(r3)
PPC_STL r3,3*SZL(r3)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 957bded0020d..51b201ddf9a1 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -467,6 +467,8 @@ static struct regbit {
{MSR_VEC, "VEC"},
{MSR_VSX, "VSX"},
{MSR_ME, "ME"},
+ {MSR_CE, "CE"},
+ {MSR_DE, "DE"},
{MSR_IR, "IR"},
{MSR_DR, "DR"},
{0, NULL}
@@ -998,7 +1000,7 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}
-static int kstack_depth_to_print = 64;
+static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
void show_stack(struct task_struct *tsk, unsigned long *stack)
{
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 3a2dc7e6586a..6f73c739f1e2 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -1160,6 +1160,8 @@ static inline void __init phyp_dump_reserve_mem(void) {}
void __init early_init_devtree(void *params)
{
+ unsigned long limit;
+
DBG(" -> early_init_devtree(%p)\n", params);
/* Setup flat device-tree pointer */
@@ -1200,7 +1202,19 @@ void __init early_init_devtree(void *params)
early_reserve_mem();
phyp_dump_reserve_mem();
- lmb_enforce_memory_limit(memory_limit);
+ limit = memory_limit;
+ if (! limit) {
+ unsigned long memsize;
+
+ /* Ensure that total memory size is page-aligned, because
+ * otherwise mark_bootmem() gets upset. */
+ lmb_analyze();
+ memsize = lmb_phys_mem_size();
+ if ((memsize & PAGE_MASK) != memsize)
+ limit = memsize & PAGE_MASK;
+ }
+ lmb_enforce_memory_limit(limit);
+
lmb_analyze();
DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
@@ -1271,6 +1285,37 @@ struct device_node *of_find_node_by_phandle(phandle handle)
EXPORT_SYMBOL(of_find_node_by_phandle);
/**
+ * of_find_next_cache_node - Find a node's subsidiary cache
+ * @np: node of type "cpu" or "cache"
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done. Caller should hold a reference
+ * to np.
+ */
+struct device_node *of_find_next_cache_node(struct device_node *np)
+{
+ struct device_node *child;
+ const phandle *handle;
+
+ handle = of_get_property(np, "l2-cache", NULL);
+ if (!handle)
+ handle = of_get_property(np, "next-level-cache", NULL);
+
+ if (handle)
+ return of_find_node_by_phandle(*handle);
+
+ /* OF on pmac has nodes instead of properties named "l2-cache"
+ * beneath CPU nodes.
+ */
+ if (!strcmp(np->type, "cpu"))
+ for_each_child_of_node(np, child)
+ if (!strcmp(child->type, "cache"))
+ return child;
+
+ return NULL;
+}
+
+/**
* of_find_all_nodes - Get next node in global list
* @prev: Previous node or NULL to start iteration
* of_node_put() will be called on it
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index a11d68976dc8..8c1335566089 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -734,10 +734,7 @@ void of_irq_map_init(unsigned int flags)
if (flags & OF_IMAP_NO_PHANDLE) {
struct device_node *np;
- for(np = NULL; (np = of_find_all_nodes(np)) != NULL;) {
- if (of_get_property(np, "interrupt-controller", NULL)
- == NULL)
- continue;
+ for_each_node_with_property(np, "interrupt-controller") {
/* Skip /chosen/interrupt-controller */
if (strcmp(np->name, "chosen") == 0)
continue;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 1f8505c23548..fdfe14c4bdef 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -566,6 +566,32 @@ int rtas_get_sensor(int sensor, int index, int *state)
}
EXPORT_SYMBOL(rtas_get_sensor);
+bool rtas_indicator_present(int token, int *maxindex)
+{
+ int proplen, count, i;
+ const struct indicator_elem {
+ u32 token;
+ u32 maxindex;
+ } *indicators;
+
+ indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
+ if (!indicators)
+ return false;
+
+ count = proplen / sizeof(struct indicator_elem);
+
+ for (i = 0; i < count; i++) {
+ if (indicators[i].token != token)
+ continue;
+ if (maxindex)
+ *maxindex = indicators[i].maxindex;
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(rtas_indicator_present);
+
int rtas_set_indicator(int indicator, int index, int new_value)
{
int token = rtas_token("set-indicator");
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 589a2797eac2..8869001ab5d7 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -301,51 +301,3 @@ void __init find_and_init_phbs(void)
#endif /* CONFIG_PPC32 */
}
}
-
-/* RPA-specific bits for removing PHBs */
-int pcibios_remove_root_bus(struct pci_controller *phb)
-{
- struct pci_bus *b = phb->bus;
- struct resource *res;
- int rc, i;
-
- res = b->resource[0];
- if (!res->flags) {
- printk(KERN_ERR "%s: no IO resource for PHB %s\n", __func__,
- b->name);
- return 1;
- }
-
- rc = pcibios_unmap_io_space(b);
- if (rc) {
- printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
- __func__, b->name);
- return 1;
- }
-
- if (release_resource(res)) {
- printk(KERN_ERR "%s: failed to release IO on bus %s\n",
- __func__, b->name);
- return 1;
- }
-
- for (i = 1; i < 3; ++i) {
- res = b->resource[i];
- if (!res->flags && i == 0) {
- printk(KERN_ERR "%s: no MEM resource for PHB %s\n",
- __func__, b->name);
- return 1;
- }
- if (res->flags && release_resource(res)) {
- printk(KERN_ERR
- "%s: failed to release IO %d on bus %s\n",
- __func__, i, b->name);
- return 1;
- }
- }
-
- pcibios_free_controller(phb);
-
- return 0;
-}
-EXPORT_SYMBOL(pcibios_remove_root_bus);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index c1a27626a940..9e1ca745d8f0 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -38,6 +38,7 @@
#include <asm/time.h>
#include <asm/serial.h>
#include <asm/udbg.h>
+#include <asm/mmu_context.h>
#include "setup.h"
@@ -49,12 +50,12 @@ int boot_cpuid;
EXPORT_SYMBOL_GPL(boot_cpuid);
int boot_cpuid_phys;
+int smp_hw_index[NR_CPUS];
+
unsigned long ISA_DMA_THRESHOLD;
unsigned int DMA_MODE_READ;
unsigned int DMA_MODE_WRITE;
-int have_of = 1;
-
#ifdef CONFIG_VGA_CONSOLE
unsigned long vgacon_remap_base;
EXPORT_SYMBOL(vgacon_remap_base);
@@ -97,6 +98,10 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
PTRRELOC(&__start___ftr_fixup),
PTRRELOC(&__stop___ftr_fixup));
+ do_feature_fixups(spec->mmu_features,
+ PTRRELOC(&__start___mmu_ftr_fixup),
+ PTRRELOC(&__stop___mmu_ftr_fixup));
+
do_lwsync_fixups(spec->cpu_features,
PTRRELOC(&__start___lwsync_fixup),
PTRRELOC(&__stop___lwsync_fixup));
@@ -121,6 +126,8 @@ notrace void __init machine_init(unsigned long dt_ptr)
probe_machine();
+ setup_kdump_trampoline();
+
#ifdef CONFIG_6xx
if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
cpu_has_feature(CPU_FTR_CAN_NAP))
@@ -326,4 +333,8 @@ void __init setup_arch(char **cmdline_p)
if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
paging_init();
+
+ /* Initialize the MMU context management stuff */
+ mmu_context_init();
+
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 169d74cef157..d8bd2161e738 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -70,7 +70,6 @@
#define DBG(fmt...)
#endif
-int have_of = 1;
int boot_cpuid = 0;
u64 ppc64_pft_size;
@@ -362,6 +361,8 @@ void __init setup_system(void)
*/
do_feature_fixups(cur_cpu_spec->cpu_features,
&__start___ftr_fixup, &__stop___ftr_fixup);
+ do_feature_fixups(cur_cpu_spec->mmu_features,
+ &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup);
do_feature_fixups(powerpc_firmware_features,
&__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
do_lwsync_fixups(cur_cpu_spec->cpu_features,
@@ -606,8 +607,6 @@ void __init setup_per_cpu_areas(void)
for_each_possible_cpu(i) {
ptr = alloc_bootmem_pages_node(NODE_DATA(cpu_to_node(i)), size);
- if (!ptr)
- panic("Cannot allocate cpu data for CPU %d\n", i);
paca[i].data_offset = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
diff --git a/arch/powerpc/kernel/smp-tbsync.c b/arch/powerpc/kernel/smp-tbsync.c
index bc892e69b4f7..a5e54526403d 100644
--- a/arch/powerpc/kernel/smp-tbsync.c
+++ b/arch/powerpc/kernel/smp-tbsync.c
@@ -113,7 +113,7 @@ void __devinit smp_generic_give_timebase(void)
{
int i, score, score2, old, min=0, max=5000, offset=1000;
- printk("Synchronizing timebase\n");
+ pr_debug("Software timebase sync\n");
/* if this fails then this kernel won't work anyway... */
tbsync = kzalloc( sizeof(*tbsync), GFP_KERNEL );
@@ -123,13 +123,13 @@ void __devinit smp_generic_give_timebase(void)
while (!tbsync->ack)
barrier();
- printk("Got ack\n");
+ pr_debug("Got ack\n");
/* binary search */
for (old = -1; old != offset ; offset = (min+max) / 2) {
score = start_contest(kSetAndTest, offset, NUM_ITER);
- printk("score %d, offset %d\n", score, offset );
+ pr_debug("score %d, offset %d\n", score, offset );
if( score > 0 )
max = offset;
@@ -140,8 +140,8 @@ void __devinit smp_generic_give_timebase(void)
score = start_contest(kSetAndTest, min, NUM_ITER);
score2 = start_contest(kSetAndTest, max, NUM_ITER);
- printk("Min %d (score %d), Max %d (score %d)\n",
- min, score, max, score2);
+ pr_debug("Min %d (score %d), Max %d (score %d)\n",
+ min, score, max, score2);
score = abs(score);
score2 = abs(score2);
offset = (score < score2) ? min : max;
@@ -155,7 +155,7 @@ void __devinit smp_generic_give_timebase(void)
if (score2 <= score || score2 < 20)
break;
}
- printk("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER );
+ pr_debug("Final offset: %d (%d/%d)\n", offset, score2, NUM_ITER );
/* exiting */
tbsync->cmd = kExit;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ff9f7010097d..8ac3f721d235 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -57,7 +57,6 @@
#define DBG(fmt...)
#endif
-int smp_hw_index[NR_CPUS];
struct thread_info *secondary_ti;
cpumask_t cpu_possible_map = CPU_MASK_NONE;
@@ -123,6 +122,65 @@ void smp_message_recv(int msg)
}
}
+static irqreturn_t call_function_action(int irq, void *data)
+{
+ generic_smp_call_function_interrupt();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t reschedule_action(int irq, void *data)
+{
+ /* we just need the return path side effect of checking need_resched */
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t call_function_single_action(int irq, void *data)
+{
+ generic_smp_call_function_single_interrupt();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t debug_ipi_action(int irq, void *data)
+{
+ smp_message_recv(PPC_MSG_DEBUGGER_BREAK);
+ return IRQ_HANDLED;
+}
+
+static irq_handler_t smp_ipi_action[] = {
+ [PPC_MSG_CALL_FUNCTION] = call_function_action,
+ [PPC_MSG_RESCHEDULE] = reschedule_action,
+ [PPC_MSG_CALL_FUNC_SINGLE] = call_function_single_action,
+ [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
+};
+
+const char *smp_ipi_name[] = {
+ [PPC_MSG_CALL_FUNCTION] = "ipi call function",
+ [PPC_MSG_RESCHEDULE] = "ipi reschedule",
+ [PPC_MSG_CALL_FUNC_SINGLE] = "ipi call function single",
+ [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
+};
+
+/* optional function to request ipi, for controllers with >= 4 ipis */
+int smp_request_message_ipi(int virq, int msg)
+{
+ int err;
+
+ if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
+ return -EINVAL;
+ }
+#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
+ if (msg == PPC_MSG_DEBUGGER_BREAK) {
+ return 1;
+ }
+#endif
+ err = request_irq(virq, smp_ipi_action[msg], IRQF_DISABLED|IRQF_PERCPU,
+ smp_ipi_name[msg], 0);
+ WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
+ virq, smp_ipi_name[msg], err);
+
+ return err;
+}
+
void smp_send_reschedule(int cpu)
{
if (likely(smp_ops))
@@ -408,8 +466,7 @@ out:
static struct device_node *cpu_to_l2cache(int cpu)
{
struct device_node *np;
- const phandle *php;
- phandle ph;
+ struct device_node *cache;
if (!cpu_present(cpu))
return NULL;
@@ -418,13 +475,11 @@ static struct device_node *cpu_to_l2cache(int cpu)
if (np == NULL)
return NULL;
- php = of_get_property(np, "l2-cache", NULL);
- if (php == NULL)
- return NULL;
- ph = *php;
+ cache = of_find_next_cache_node(np);
+
of_node_put(np);
- return of_find_node_by_phandle(ph);
+ return cache;
}
/* Activate a secondary processor. */
diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c
index 77b7b34b5955..560c96119501 100644
--- a/arch/powerpc/kernel/swsusp.c
+++ b/arch/powerpc/kernel/swsusp.c
@@ -34,6 +34,6 @@ void save_processor_state(void)
void restore_processor_state(void)
{
#ifdef CONFIG_PPC32
- set_context(current->active_mm->context.id, current->active_mm->pgd);
+ switch_mmu_context(NULL, current->active_mm);
#endif
}
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index 77fc76607ab2..b47d8ceffb52 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -5,7 +5,7 @@
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
-
+#include <asm/mmu.h>
/*
* Structure for storing CPU registers on the save area.
@@ -279,7 +279,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mtibatl 3,r4
#endif
-BEGIN_FTR_SECTION
+BEGIN_MMU_FTR_SECTION
li r4,0
mtspr SPRN_DBAT4U,r4
mtspr SPRN_DBAT4L,r4
@@ -297,7 +297,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_IBAT6L,r4
mtspr SPRN_IBAT7U,r4
mtspr SPRN_IBAT7L,r4
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
/* Flush all TLBs */
lis r4,0x1000
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 20885a38237a..0c64f10087b9 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -566,7 +566,6 @@ static bool cache_is_unified(struct device_node *np)
static struct cache_desc * __cpuinit create_cache_index_info(struct device_node *np, struct kobject *parent, int index, int level)
{
- const phandle *next_cache_phandle;
struct device_node *next_cache;
struct cache_desc *new, **end;
@@ -591,11 +590,7 @@ static struct cache_desc * __cpuinit create_cache_index_info(struct device_node
while (*end)
end = &(*end)->next;
- next_cache_phandle = of_get_property(np, "l2-cache", NULL);
- if (!next_cache_phandle)
- goto out;
-
- next_cache = of_find_node_by_phandle(*next_cache_phandle);
+ next_cache = of_find_next_cache_node(np);
if (!next_cache)
goto out;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index e2ee66b5831d..e1f3a5140429 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -164,8 +164,6 @@ static u64 tb_to_ns_scale __read_mostly;
static unsigned tb_to_ns_shift __read_mostly;
static unsigned long boot_tb __read_mostly;
-static struct gettimeofday_struct do_gtod;
-
extern struct timezone sys_tz;
static long timezone_offset;
@@ -415,31 +413,9 @@ void udelay(unsigned long usecs)
}
EXPORT_SYMBOL(udelay);
-
-/*
- * There are two copies of tb_to_xs and stamp_xsec so that no
- * lock is needed to access and use these values in
- * do_gettimeofday. We alternate the copies and as long as a
- * reasonable time elapses between changes, there will never
- * be inconsistent values. ntpd has a minimum of one minute
- * between updates.
- */
static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
u64 new_tb_to_xs)
{
- unsigned temp_idx;
- struct gettimeofday_vars *temp_varp;
-
- temp_idx = (do_gtod.var_idx == 0);
- temp_varp = &do_gtod.vars[temp_idx];
-
- temp_varp->tb_to_xs = new_tb_to_xs;
- temp_varp->tb_orig_stamp = new_tb_stamp;
- temp_varp->stamp_xsec = new_stamp_xsec;
- smp_mb();
- do_gtod.varp = temp_varp;
- do_gtod.var_idx = temp_idx;
-
/*
* tb_update_count is used to allow the userspace gettimeofday code
* to assure itself that it sees a consistent view of the tb_to_xs and
@@ -456,6 +432,7 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
vdso_data->tb_to_xs = new_tb_to_xs;
vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
+ vdso_data->stamp_xtime = xtime;
smp_wmb();
++(vdso_data->tb_update_count);
}
@@ -514,9 +491,7 @@ static int __init iSeries_tb_recal(void)
tb_ticks_per_sec = new_tb_ticks_per_sec;
calc_cputime_factors();
div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
- do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
tb_to_xs = divres.result_low;
- do_gtod.varp->tb_to_xs = tb_to_xs;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
vdso_data->tb_to_xs = tb_to_xs;
}
@@ -988,15 +963,6 @@ void __init time_init(void)
sys_tz.tz_dsttime = 0;
}
- do_gtod.varp = &do_gtod.vars[0];
- do_gtod.var_idx = 0;
- do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
- __get_cpu_var(last_jiffy) = tb_last_jiffy;
- do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
- do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
- do_gtod.varp->tb_to_xs = tb_to_xs;
- do_gtod.tb_to_us = tb_to_us;
-
vdso_data->tb_orig_stamp = tb_last_jiffy;
vdso_data->tb_update_count = 0;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index f5def6cf5cd6..5457e9575685 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1160,37 +1160,85 @@ void CacheLockingException(struct pt_regs *regs, unsigned long address,
#ifdef CONFIG_SPE
void SPEFloatingPointException(struct pt_regs *regs)
{
+ extern int do_spe_mathemu(struct pt_regs *regs);
unsigned long spefscr;
int fpexc_mode;
int code = 0;
+ int err;
+
+ preempt_disable();
+ if (regs->msr & MSR_SPE)
+ giveup_spe(current);
+ preempt_enable();
spefscr = current->thread.spefscr;
fpexc_mode = current->thread.fpexc_mode;
- /* Hardware does not neccessarily set sticky
- * underflow/overflow/invalid flags */
if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
code = FPE_FLTOVF;
- spefscr |= SPEFSCR_FOVFS;
}
else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
code = FPE_FLTUND;
- spefscr |= SPEFSCR_FUNFS;
}
else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
code = FPE_FLTDIV;
else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
code = FPE_FLTINV;
- spefscr |= SPEFSCR_FINVS;
}
else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
code = FPE_FLTRES;
- current->thread.spefscr = spefscr;
+ err = do_spe_mathemu(regs);
+ if (err == 0) {
+ regs->nip += 4; /* skip emulated instruction */
+ emulate_single_step(regs);
+ return;
+ }
+
+ if (err == -EFAULT) {
+ /* got an error reading the instruction */
+ _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
+ } else if (err == -EINVAL) {
+ /* didn't recognize the instruction */
+ printk(KERN_ERR "unrecognized spe instruction "
+ "in %s at %lx\n", current->comm, regs->nip);
+ } else {
+ _exception(SIGFPE, regs, code, regs->nip);
+ }
- _exception(SIGFPE, regs, code, regs->nip);
return;
}
+
+void SPEFloatingPointRoundException(struct pt_regs *regs)
+{
+ extern int speround_handler(struct pt_regs *regs);
+ int err;
+
+ preempt_disable();
+ if (regs->msr & MSR_SPE)
+ giveup_spe(current);
+ preempt_enable();
+
+ regs->nip -= 4;
+ err = speround_handler(regs);
+ if (err == 0) {
+ regs->nip += 4; /* skip emulated instruction */
+ emulate_single_step(regs);
+ return;
+ }
+
+ if (err == -EFAULT) {
+ /* got an error reading the instruction */
+ _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
+ } else if (err == -EINVAL) {
+ /* didn't recognize the instruction */
+ printk(KERN_ERR "unrecognized spe instruction "
+ "in %s at %lx\n", current->comm, regs->nip);
+ } else {
+ _exception(SIGFPE, regs, 0, regs->nip);
+ return;
+ }
+}
#endif
/*
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index f7ec7d0888fe..ad06d5c75b15 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -566,6 +566,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
do_feature_fixups(cur_cpu_spec->cpu_features,
start64, start64 + size64);
+ start64 = find_section64(v64->hdr, "__mmu_ftr_fixup", &size64);
+ if (start64)
+ do_feature_fixups(cur_cpu_spec->mmu_features,
+ start64, start64 + size64);
+
start64 = find_section64(v64->hdr, "__fw_ftr_fixup", &size64);
if (start64)
do_feature_fixups(powerpc_firmware_features,
@@ -582,6 +587,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
do_feature_fixups(cur_cpu_spec->cpu_features,
start32, start32 + size32);
+ start32 = find_section32(v32->hdr, "__mmu_ftr_fixup", &size32);
+ if (start32)
+ do_feature_fixups(cur_cpu_spec->mmu_features,
+ start32, start32 + size32);
+
#ifdef CONFIG_PPC64
start32 = find_section32(v32->hdr, "__fw_ftr_fixup", &size32);
if (start32)
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 72ca26df457e..ee038d4bf252 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -16,6 +16,13 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
+/* Offset for the low 32-bit part of a field of long type */
+#ifdef CONFIG_PPC64
+#define LOPART 4
+#else
+#define LOPART 0
+#endif
+
.text
/*
* Exact prototype of gettimeofday
@@ -90,101 +97,53 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
mflr r12 /* r12 saves lr */
.cfi_register lr,r12
- mr r10,r3 /* r10 saves id */
mr r11,r4 /* r11 saves tp */
bl __get_datapage@local /* get data page */
mr r9,r3 /* datapage ptr in r9 */
- beq cr1,50f /* if monotonic -> jump there */
-
- /*
- * CLOCK_REALTIME
- */
-
- bl __do_get_xsec@local /* get xsec from tb & kernel */
- bne- 98f /* out of line -> do syscall */
-
- /* seconds are xsec >> 20 */
- rlwinm r5,r4,12,20,31
- rlwimi r5,r3,12,0,19
- stw r5,TSPC32_TV_SEC(r11)
- /* get remaining xsec and convert to nsec. we scale
- * up remaining xsec by 12 bits and get the top 32 bits
- * of the multiplication, then we multiply by 1000
- */
- rlwinm r5,r4,12,0,19
- lis r6,1000000@h
- ori r6,r6,1000000@l
- mulhwu r5,r5,r6
- mulli r5,r5,1000
- stw r5,TSPC32_TV_NSEC(r11)
- mtlr r12
- crclr cr0*4+so
- li r3,0
- blr
+50: bl __do_get_tspec@local /* get sec/nsec from tb & kernel */
+ bne cr1,80f /* not monotonic -> all done */
/*
* CLOCK_MONOTONIC
*/
-50: bl __do_get_xsec@local /* get xsec from tb & kernel */
- bne- 98f /* out of line -> do syscall */
-
- /* seconds are xsec >> 20 */
- rlwinm r6,r4,12,20,31
- rlwimi r6,r3,12,0,19
-
- /* get remaining xsec and convert to nsec. we scale
- * up remaining xsec by 12 bits and get the top 32 bits
- * of the multiplication, then we multiply by 1000
- */
- rlwinm r7,r4,12,0,19
- lis r5,1000000@h
- ori r5,r5,1000000@l
- mulhwu r7,r7,r5
- mulli r7,r7,1000
-
/* now we must fixup using wall to monotonic. We need to snapshot
* that value and do the counter trick again. Fortunately, we still
* have the counter value in r8 that was returned by __do_get_xsec.
- * At this point, r6,r7 contain our sec/nsec values, r3,r4 and r5
- * can be used
+ * At this point, r3,r4 contain our sec/nsec values, r5 and r6
+ * can be used, r7 contains NSEC_PER_SEC.
*/
- lwz r3,WTOM_CLOCK_SEC(r9)
- lwz r4,WTOM_CLOCK_NSEC(r9)
+ lwz r5,WTOM_CLOCK_SEC(r9)
+ lwz r6,WTOM_CLOCK_NSEC(r9)
- /* We now have our result in r3,r4. We create a fake dependency
- * on that result and re-check the counter
+ /* We now have our offset in r5,r6. We create a fake dependency
+ * on that value and re-check the counter
*/
- or r5,r4,r3
- xor r0,r5,r5
+ or r0,r6,r5
+ xor r0,r0,r0
add r9,r9,r0
-#ifdef CONFIG_PPC64
- lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9)
-#else
- lwz r0,(CFG_TB_UPDATE_COUNT)(r9)
-#endif
+ lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
cmpl cr0,r8,r0 /* check if updated */
bne- 50b
- /* Calculate and store result. Note that this mimmics the C code,
+ /* Calculate and store result. Note that this mimics the C code,
* which may cause funny results if nsec goes negative... is that
* possible at all ?
*/
- add r3,r3,r6
- add r4,r4,r7
- lis r5,NSEC_PER_SEC@h
- ori r5,r5,NSEC_PER_SEC@l
- cmpl cr0,r4,r5
- cmpli cr1,r4,0
+ add r3,r3,r5
+ add r4,r4,r6
+ cmpw cr0,r4,r7
+ cmpwi cr1,r4,0
blt 1f
- subf r4,r5,r4
+ subf r4,r7,r4
addi r3,r3,1
-1: bge cr1,1f
+1: bge cr1,80f
addi r3,r3,-1
- add r4,r4,r5
-1: stw r3,TSPC32_TV_SEC(r11)
+ add r4,r4,r7
+
+80: stw r3,TSPC32_TV_SEC(r11)
stw r4,TSPC32_TV_NSEC(r11)
mtlr r12
@@ -195,10 +154,6 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
/*
* syscall fallback
*/
-98:
- mtlr r12
- mr r3,r10
- mr r4,r11
99:
li r0,__NR_clock_gettime
sc
@@ -254,11 +209,7 @@ __do_get_xsec:
/* Check for update count & load values. We use the low
* order 32 bits of the update count
*/
-#ifdef CONFIG_PPC64
-1: lwz r8,(CFG_TB_UPDATE_COUNT+4)(r9)
-#else
-1: lwz r8,(CFG_TB_UPDATE_COUNT)(r9)
-#endif
+1: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
andi. r0,r8,1 /* pending update ? loop */
bne- 1b
xor r0,r8,r8 /* create dependency */
@@ -305,11 +256,7 @@ __do_get_xsec:
or r6,r4,r3
xor r0,r6,r6
add r9,r9,r0
-#ifdef CONFIG_PPC64
- lwz r0,(CFG_TB_UPDATE_COUNT+4)(r9)
-#else
- lwz r0,(CFG_TB_UPDATE_COUNT)(r9)
-#endif
+ lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
cmpl cr0,r8,r0 /* check if updated */
bne- 1b
@@ -322,3 +269,98 @@ __do_get_xsec:
*/
3: blr
.cfi_endproc
+
+/*
+ * This is the core of clock_gettime(), it returns the current
+ * time in seconds and nanoseconds in r3 and r4.
+ * It expects the datapage ptr in r9 and doesn't clobber it.
+ * It clobbers r0, r5, r6, r10 and returns NSEC_PER_SEC in r7.
+ * On return, r8 contains the counter value that can be reused.
+ * This clobbers cr0 but not any other cr field.
+ */
+__do_get_tspec:
+ .cfi_startproc
+ /* Check for update count & load values. We use the low
+ * order 32 bits of the update count
+ */
+1: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
+ andi. r0,r8,1 /* pending update ? loop */
+ bne- 1b
+ xor r0,r8,r8 /* create dependency */
+ add r9,r9,r0
+
+ /* Load orig stamp (offset to TB) */
+ lwz r5,CFG_TB_ORIG_STAMP(r9)
+ lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
+
+ /* Get a stable TB value */
+2: mftbu r3
+ mftbl r4
+ mftbu r0
+ cmpl cr0,r3,r0
+ bne- 2b
+
+ /* Subtract tb orig stamp and shift left 12 bits.
+ */
+ subfc r7,r6,r4
+ subfe r0,r5,r3
+ slwi r0,r0,12
+ rlwimi. r0,r7,12,20,31
+ slwi r7,r7,12
+
+ /* Load scale factor & do multiplication */
+ lwz r5,CFG_TB_TO_XS(r9) /* load values */
+ lwz r6,(CFG_TB_TO_XS+4)(r9)
+ mulhwu r3,r7,r6
+ mullw r10,r7,r5
+ mulhwu r4,r7,r5
+ addc r10,r3,r10
+ li r3,0
+
+ beq+ 4f /* skip high part computation if 0 */
+ mulhwu r3,r0,r5
+ mullw r7,r0,r5
+ mulhwu r5,r0,r6
+ mullw r6,r0,r6
+ adde r4,r4,r7
+ addze r3,r3
+ addc r4,r4,r5
+ addze r3,r3
+ addc r10,r10,r6
+
+4: addze r4,r4 /* add in carry */
+ lis r7,NSEC_PER_SEC@h
+ ori r7,r7,NSEC_PER_SEC@l
+ mulhwu r4,r4,r7 /* convert to nanoseconds */
+
+ /* At this point, we have seconds & nanoseconds since the xtime
+ * stamp in r3+CA and r4. Load & add the xtime stamp.
+ */
+#ifdef CONFIG_PPC64
+ lwz r5,STAMP_XTIME+TSPC64_TV_SEC+LOPART(r9)
+ lwz r6,STAMP_XTIME+TSPC64_TV_NSEC+LOPART(r9)
+#else
+ lwz r5,STAMP_XTIME+TSPC32_TV_SEC(r9)
+ lwz r6,STAMP_XTIME+TSPC32_TV_NSEC(r9)
+#endif
+ add r4,r4,r6
+ adde r3,r3,r5
+
+ /* We now have our result in r3,r4. We create a fake dependency
+ * on that result and re-check the counter
+ */
+ or r6,r4,r3
+ xor r0,r6,r6
+ add r9,r9,r0
+ lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
+ cmpl cr0,r8,r0 /* check if updated */
+ bne- 1b
+
+ /* check for nanosecond overflow and adjust if necessary */
+ cmpw r4,r7
+ bltlr /* all done if no overflow */
+ subf r4,r7,r4 /* adjust if overflow */
+ addi r3,r3,1
+
+ blr
+ .cfi_endproc
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index be3b6a41dc09..904ef1360dd7 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -34,6 +34,9 @@ SECTIONS
__ftr_fixup : { *(__ftr_fixup) }
. = ALIGN(8);
+ __mmu_ftr_fixup : { *(__mmu_ftr_fixup) }
+
+ . = ALIGN(8);
__lwsync_fixup : { *(__lwsync_fixup) }
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index c6401f9e37f1..262cd5857a56 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -75,90 +75,49 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
mflr r12 /* r12 saves lr */
.cfi_register lr,r12
- mr r10,r3 /* r10 saves id */
mr r11,r4 /* r11 saves tp */
bl V_LOCAL_FUNC(__get_datapage) /* get data page */
- beq cr1,50f /* if monotonic -> jump there */
-
- /*
- * CLOCK_REALTIME
- */
-
- bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
-
- lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
- ori r7,r7,16960
- rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
- rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
- std r5,TSPC64_TV_SEC(r11) /* store sec in tv */
- subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
- mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
- * XSEC_PER_SEC
- */
- rldicl r0,r0,44,20
- mulli r0,r0,1000 /* nsec = usec * 1000 */
- std r0,TSPC64_TV_NSEC(r11) /* store nsec in tp */
-
- mtlr r12
- crclr cr0*4+so
- li r3,0
- blr
+50: bl V_LOCAL_FUNC(__do_get_tspec) /* get time from tb & kernel */
+ bne cr1,80f /* if not monotonic, all done */
/*
* CLOCK_MONOTONIC
*/
-50: bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
-
- lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
- ori r7,r7,16960
- rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
- rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
- subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
- mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
- * XSEC_PER_SEC
- */
- rldicl r6,r0,44,20
- mulli r6,r6,1000 /* nsec = usec * 1000 */
-
/* now we must fixup using wall to monotonic. We need to snapshot
* that value and do the counter trick again. Fortunately, we still
- * have the counter value in r8 that was returned by __do_get_xsec.
- * At this point, r5,r6 contain our sec/nsec values.
- * can be used
+ * have the counter value in r8 that was returned by __do_get_tspec.
+ * At this point, r4,r5 contain our sec/nsec values.
*/
- lwa r4,WTOM_CLOCK_SEC(r3)
- lwa r7,WTOM_CLOCK_NSEC(r3)
+ lwa r6,WTOM_CLOCK_SEC(r3)
+ lwa r9,WTOM_CLOCK_NSEC(r3)
- /* We now have our result in r4,r7. We create a fake dependency
+ /* We now have our result in r6,r9. We create a fake dependency
* on that result and re-check the counter
*/
- or r9,r4,r7
- xor r0,r9,r9
+ or r0,r6,r9
+ xor r0,r0,r0
add r3,r3,r0
ld r0,CFG_TB_UPDATE_COUNT(r3)
cmpld cr0,r0,r8 /* check if updated */
bne- 50b
- /* Calculate and store result. Note that this mimmics the C code,
- * which may cause funny results if nsec goes negative... is that
- * possible at all ?
+ /* Add wall->monotonic offset and check for overflow or underflow.
*/
- add r4,r4,r5
- add r7,r7,r6
- lis r9,NSEC_PER_SEC@h
- ori r9,r9,NSEC_PER_SEC@l
- cmpl cr0,r7,r9
- cmpli cr1,r7,0
+ add r4,r4,r6
+ add r5,r5,r9
+ cmpd cr0,r5,r7
+ cmpdi cr1,r5,0
blt 1f
- subf r7,r9,r7
+ subf r5,r7,r5
addi r4,r4,1
-1: bge cr1,1f
+1: bge cr1,80f
addi r4,r4,-1
- add r7,r7,r9
-1: std r4,TSPC64_TV_SEC(r11)
- std r7,TSPC64_TV_NSEC(r11)
+ add r5,r5,r7
+
+80: std r4,TSPC64_TV_SEC(r11)
+ std r5,TSPC64_TV_NSEC(r11)
mtlr r12
crclr cr0*4+so
@@ -168,10 +127,6 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
/*
* syscall fallback
*/
-98:
- mtlr r12
- mr r3,r10
- mr r4,r11
99:
li r0,__NR_clock_gettime
sc
@@ -253,3 +208,59 @@ V_FUNCTION_BEGIN(__do_get_xsec)
blr
.cfi_endproc
V_FUNCTION_END(__do_get_xsec)
+
+/*
+ * This is the core of clock_gettime(), it returns the current
+ * time in seconds and nanoseconds in r4 and r5.
+ * It expects the datapage ptr in r3 and doesn't clobber it.
+ * It clobbers r0 and r6 and returns NSEC_PER_SEC in r7.
+ * On return, r8 contains the counter value that can be reused.
+ * This clobbers cr0 but not any other cr field.
+ */
+V_FUNCTION_BEGIN(__do_get_tspec)
+ .cfi_startproc
+ /* check for update count & load values */
+1: ld r8,CFG_TB_UPDATE_COUNT(r3)
+ andi. r0,r8,1 /* pending update ? loop */
+ bne- 1b
+ xor r0,r8,r8 /* create dependency */
+ add r3,r3,r0
+
+ /* Get TB & offset it. We use the MFTB macro which will generate
+ * workaround code for Cell.
+ */
+ MFTB(r7)
+ ld r9,CFG_TB_ORIG_STAMP(r3)
+ subf r7,r9,r7
+
+ /* Scale result */
+ ld r5,CFG_TB_TO_XS(r3)
+ sldi r7,r7,12 /* compute time since stamp_xtime */
+ mulhdu r6,r7,r5 /* in units of 2^-32 seconds */
+
+ /* Add stamp since epoch */
+ ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
+ ld r5,STAMP_XTIME+TSPC64_TV_NSEC(r3)
+ or r0,r4,r5
+ or r0,r0,r6
+ xor r0,r0,r0
+ add r3,r3,r0
+ ld r0,CFG_TB_UPDATE_COUNT(r3)
+ cmpld r0,r8 /* check if updated */
+ bne- 1b /* reload if so */
+
+ /* convert to seconds & nanoseconds and add to stamp */
+ lis r7,NSEC_PER_SEC@h
+ ori r7,r7,NSEC_PER_SEC@l
+ mulhwu r0,r6,r7 /* compute nanoseconds and */
+ srdi r6,r6,32 /* seconds since stamp_xtime */
+ clrldi r0,r0,32
+ add r5,r5,r0 /* add nanoseconds together */
+ cmpd r5,r7 /* overflow? */
+ add r4,r4,r6
+ bltlr /* all done if no overflow */
+ subf r5,r7,r5 /* if overflow, adjust */
+ addi r4,r4,1
+ blr
+ .cfi_endproc
+V_FUNCTION_END(__do_get_tspec)
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index d0b2526dd38d..0e615404e247 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -35,6 +35,9 @@ SECTIONS
__ftr_fixup : { *(__ftr_fixup) }
. = ALIGN(8);
+ __mmu_ftr_fixup : { *(__mmu_ftr_fixup) }
+
+ . = ALIGN(8);
__lwsync_fixup : { *(__lwsync_fixup) }
. = ALIGN(8);
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index a11e6bc59b30..94aa7b011b27 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -41,9 +41,9 @@
static struct bus_type vio_bus_type;
static struct vio_dev vio_bus_device = { /* fake "parent" device */
- .name = vio_bus_device.dev.bus_id,
+ .name = "vio",
.type = "",
- .dev.bus_id = "vio",
+ .dev.init_name = "vio",
.dev.bus = &vio_bus_type,
};
@@ -1216,7 +1216,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
viodev->irq = irq_of_parse_and_map(of_node, 0);
- snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%x", *unit_address);
+ dev_set_name(&viodev->dev, "%x", *unit_address);
viodev->name = of_node->name;
viodev->type = of_node->type;
viodev->unit_address = *unit_address;
@@ -1243,7 +1243,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
/* register with generic device framework */
if (device_register(&viodev->dev)) {
printk(KERN_ERR "%s: failed to register device %s\n",
- __func__, viodev->dev.bus_id);
+ __func__, dev_name(&viodev->dev));
/* XXX free TCE table */
kfree(viodev);
return NULL;
@@ -1400,13 +1400,13 @@ static struct vio_dev *vio_find_name(const char *name)
struct vio_dev *vio_find_node(struct device_node *vnode)
{
const uint32_t *unit_address;
- char kobj_name[BUS_ID_SIZE];
+ char kobj_name[20];
/* construct the kobject name from the device node */
unit_address = of_get_property(vnode, "reg", NULL);
if (!unit_address)
return NULL;
- snprintf(kobj_name, BUS_ID_SIZE, "%x", *unit_address);
+ snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address);
return vio_find_name(kobj_name);
}
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 2412c056baa4..47bf15cd2c9e 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -152,6 +152,12 @@ SECTIONS
__stop___ftr_fixup = .;
}
. = ALIGN(8);
+ __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
+ __start___mmu_ftr_fixup = .;
+ *(__mmu_ftr_fixup)
+ __stop___mmu_ftr_fixup = .;
+ }
+ . = ALIGN(8);
__lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
__start___lwsync_fixup = .;
*(__lwsync_fixup)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index fda9baada132..8bef0efcdfe1 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -28,6 +28,7 @@
#include <asm/uaccess.h>
#include <asm/kvm_ppc.h>
#include <asm/tlbflush.h>
+#include "../mm/mmu_decl.h"
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
@@ -330,7 +331,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
/* XXX It would be nice to differentiate between heavyweight exit and
* sched_out here, since we could avoid the TLB flush for heavyweight
* exits. */
- _tlbia();
+ _tlbil_all();
}
int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 25ec5378afa4..70693a5c12a1 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -26,11 +26,24 @@ _GLOBAL(__copy_tofrom_user)
andi. r6,r6,7
PPC_MTOCRF 0x01,r5
blt cr1,.Lshort_copy
+/* Below we want to nop out the bne if we're on a CPU that has the
+ * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
+ * cleared.
+ * At the time of writing the only CPU that has this combination of bits
+ * set is Power6.
+ */
+BEGIN_FTR_SECTION
+ nop
+FTR_SECTION_ELSE
bne .Ldst_unaligned
+ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
+ CPU_FTR_UNALIGNED_LD_STD)
.Ldst_aligned:
- andi. r0,r4,7
addi r3,r3,-16
+BEGIN_FTR_SECTION
+ andi. r0,r4,7
bne .Lsrc_unaligned
+END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
srdi r7,r5,4
20: ld r9,0(r4)
addi r4,r4,-8
@@ -138,7 +151,7 @@ _GLOBAL(__copy_tofrom_user)
PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */
subf r5,r6,r5
li r7,0
- cmpldi r1,r5,16
+ cmpldi cr1,r5,16
bf cr7*4+3,1f
35: lbz r0,0(r4)
81: stb r0,0(r3)
diff --git a/arch/powerpc/lib/dma-noncoherent.c b/arch/powerpc/lib/dma-noncoherent.c
index 31734c0969cd..b7dc4c19f582 100644
--- a/arch/powerpc/lib/dma-noncoherent.c
+++ b/arch/powerpc/lib/dma-noncoherent.c
@@ -77,26 +77,26 @@ static DEFINE_SPINLOCK(consistent_lock);
* the amount of RAM found at boot time.) I would imagine that get_vm_area()
* would have to initialise this each time prior to calling vm_region_alloc().
*/
-struct vm_region {
+struct ppc_vm_region {
struct list_head vm_list;
unsigned long vm_start;
unsigned long vm_end;
};
-static struct vm_region consistent_head = {
+static struct ppc_vm_region consistent_head = {
.vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
.vm_start = CONSISTENT_BASE,
.vm_end = CONSISTENT_END,
};
-static struct vm_region *
-vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
+static struct ppc_vm_region *
+ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
{
unsigned long addr = head->vm_start, end = head->vm_end - size;
unsigned long flags;
- struct vm_region *c, *new;
+ struct ppc_vm_region *c, *new;
- new = kmalloc(sizeof(struct vm_region), gfp);
+ new = kmalloc(sizeof(struct ppc_vm_region), gfp);
if (!new)
goto out;
@@ -130,9 +130,9 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
return NULL;
}
-static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
+static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
{
- struct vm_region *c;
+ struct ppc_vm_region *c;
list_for_each_entry(c, &head->vm_list, vm_list) {
if (c->vm_start == addr)
@@ -151,7 +151,7 @@ void *
__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
{
struct page *page;
- struct vm_region *c;
+ struct ppc_vm_region *c;
unsigned long order;
u64 mask = 0x00ffffff, limit; /* ISA default */
@@ -191,7 +191,7 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
/*
* Allocate a virtual address in the consistent mapping region.
*/
- c = vm_region_alloc(&consistent_head, size,
+ c = ppc_vm_region_alloc(&consistent_head, size,
gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
if (c) {
unsigned long vaddr = c->vm_start;
@@ -239,7 +239,7 @@ EXPORT_SYMBOL(__dma_alloc_coherent);
*/
void __dma_free_coherent(size_t size, void *vaddr)
{
- struct vm_region *c;
+ struct ppc_vm_region *c;
unsigned long flags, addr;
pte_t *ptep;
@@ -247,7 +247,7 @@ void __dma_free_coherent(size_t size, void *vaddr)
spin_lock_irqsave(&consistent_lock, flags);
- c = vm_region_find(&consistent_head, (unsigned long)vaddr);
+ c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
if (!c)
goto no_area;
@@ -320,7 +320,6 @@ static int __init dma_alloc_init(void)
ret = -ENOMEM;
break;
}
- WARN_ON(!pmd_none(*pmd));
pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
if (!pte) {
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index 3f131129d1c1..fe2d34e5332d 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -18,11 +18,23 @@ _GLOBAL(memcpy)
andi. r6,r6,7
dcbt 0,r4
blt cr1,.Lshort_copy
+/* Below we want to nop out the bne if we're on a CPU that has the
+ CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
+ cleared.
+ At the time of writing the only CPU that has this combination of bits
+ set is Power6. */
+BEGIN_FTR_SECTION
+ nop
+FTR_SECTION_ELSE
bne .Ldst_unaligned
+ALT_FTR_SECTION_END(CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_CP_USE_DCBTZ, \
+ CPU_FTR_UNALIGNED_LD_STD)
.Ldst_aligned:
- andi. r0,r4,7
addi r3,r3,-16
+BEGIN_FTR_SECTION
+ andi. r0,r4,7
bne .Lsrc_unaligned
+END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
srdi r7,r5,4
ld r9,0(r4)
addi r4,r4,-8
@@ -131,7 +143,7 @@ _GLOBAL(memcpy)
PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7
subf r5,r6,r5
li r7,0
- cmpldi r1,r5,16
+ cmpldi cr1,r5,16
bf cr7*4+3,1f
lbz r0,0(r4)
stb r0,0(r3)
diff --git a/arch/powerpc/math-emu/Makefile b/arch/powerpc/math-emu/Makefile
index 03aa98dd9f0a..f9e506a735ae 100644
--- a/arch/powerpc/math-emu/Makefile
+++ b/arch/powerpc/math-emu/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_MATH_EMULATION) += fabs.o fadd.o fadds.o fcmpo.o fcmpu.o \
mcrfs.o mffs.o mtfsb0.o mtfsb1.o \
mtfsf.o mtfsfi.o stfiwx.o stfs.o
+obj-$(CONFIG_SPE) += math_efp.o
+
CFLAGS_fabs.o = -fno-builtin-fabs
CFLAGS_math.o = -fno-builtin-fabs
diff --git a/arch/powerpc/math-emu/fadd.c b/arch/powerpc/math-emu/fadd.c
index 04d3b4aa32ce..0158a16e2b82 100644
--- a/arch/powerpc/math-emu/fadd.c
+++ b/arch/powerpc/math-emu/fadd.c
@@ -13,7 +13,6 @@ fadd(void *frD, void *frA, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
diff --git a/arch/powerpc/math-emu/fcmpo.c b/arch/powerpc/math-emu/fcmpo.c
index b5dc4498cd71..5bce011c2aec 100644
--- a/arch/powerpc/math-emu/fcmpo.c
+++ b/arch/powerpc/math-emu/fcmpo.c
@@ -14,7 +14,6 @@ fcmpo(u32 *ccr, int crfD, void *frA, void *frB)
FP_DECL_EX;
int code[4] = { (1 << 3), (1 << 1), (1 << 2), (1 << 0) };
long cmp;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p (%08x) %d %p %p\n", __func__, ccr, *ccr, crfD, frA, frB);
@@ -29,7 +28,7 @@ fcmpo(u32 *ccr, int crfD, void *frA, void *frB)
#endif
if (A_c == FP_CLS_NAN || B_c == FP_CLS_NAN)
- ret |= EFLAG_VXVC;
+ FP_SET_EXCEPTION(EFLAG_VXVC);
FP_CMP_D(cmp, A, B, 2);
cmp = code[(cmp + 1) & 3];
@@ -44,5 +43,5 @@ fcmpo(u32 *ccr, int crfD, void *frA, void *frB)
printk("CR: %08x\n", *ccr);
#endif
- return ret;
+ return FP_CUR_EXCEPTIONS;
}
diff --git a/arch/powerpc/math-emu/fdiv.c b/arch/powerpc/math-emu/fdiv.c
index 2db15097d98e..a29239c05e3e 100644
--- a/arch/powerpc/math-emu/fdiv.c
+++ b/arch/powerpc/math-emu/fdiv.c
@@ -13,7 +13,6 @@ fdiv(void *frD, void *frA, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -28,22 +27,22 @@ fdiv(void *frD, void *frA, void *frB)
#endif
if (A_c == FP_CLS_ZERO && B_c == FP_CLS_ZERO) {
- ret |= EFLAG_VXZDZ;
+ FP_SET_EXCEPTION(EFLAG_VXZDZ);
#ifdef DEBUG
printk("%s: FPSCR_VXZDZ raised\n", __func__);
#endif
}
if (A_c == FP_CLS_INF && B_c == FP_CLS_INF) {
- ret |= EFLAG_VXIDI;
+ FP_SET_EXCEPTION(EFLAG_VXIDI);
#ifdef DEBUG
printk("%s: FPSCR_VXIDI raised\n", __func__);
#endif
}
if (B_c == FP_CLS_ZERO && A_c != FP_CLS_ZERO) {
- ret |= EFLAG_DIVZERO;
+ FP_SET_EXCEPTION(EFLAG_DIVZERO);
if (__FPU_TRAP_P(EFLAG_DIVZERO))
- return ret;
+ return FP_CUR_EXCEPTIONS;
}
FP_DIV_D(R, A, B);
diff --git a/arch/powerpc/math-emu/fdivs.c b/arch/powerpc/math-emu/fdivs.c
index 797f6a9a20b5..526bc261275f 100644
--- a/arch/powerpc/math-emu/fdivs.c
+++ b/arch/powerpc/math-emu/fdivs.c
@@ -14,7 +14,6 @@ fdivs(void *frD, void *frA, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -29,22 +28,22 @@ fdivs(void *frD, void *frA, void *frB)
#endif
if (A_c == FP_CLS_ZERO && B_c == FP_CLS_ZERO) {
- ret |= EFLAG_VXZDZ;
+ FP_SET_EXCEPTION(EFLAG_VXZDZ);
#ifdef DEBUG
printk("%s: FPSCR_VXZDZ raised\n", __func__);
#endif
}
if (A_c == FP_CLS_INF && B_c == FP_CLS_INF) {
- ret |= EFLAG_VXIDI;
+ FP_SET_EXCEPTION(EFLAG_VXIDI);
#ifdef DEBUG
printk("%s: FPSCR_VXIDI raised\n", __func__);
#endif
}
if (B_c == FP_CLS_ZERO && A_c != FP_CLS_ZERO) {
- ret |= EFLAG_DIVZERO;
+ FP_SET_EXCEPTION(EFLAG_DIVZERO);
if (__FPU_TRAP_P(EFLAG_DIVZERO))
- return ret;
+ return FP_CUR_EXCEPTIONS;
}
FP_DIV_D(R, A, B);
diff --git a/arch/powerpc/math-emu/fmadd.c b/arch/powerpc/math-emu/fmadd.c
index 925313aa6f82..8c3f20aa5a95 100644
--- a/arch/powerpc/math-emu/fmadd.c
+++ b/arch/powerpc/math-emu/fmadd.c
@@ -15,7 +15,6 @@ fmadd(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -33,12 +32,12 @@ fmadd(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fmadds.c b/arch/powerpc/math-emu/fmadds.c
index aea80ef79399..794fb31e59d1 100644
--- a/arch/powerpc/math-emu/fmadds.c
+++ b/arch/powerpc/math-emu/fmadds.c
@@ -16,7 +16,6 @@ fmadds(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -34,12 +33,12 @@ fmadds(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fmsub.c b/arch/powerpc/math-emu/fmsub.c
index a644d525fca6..626f6fed84ac 100644
--- a/arch/powerpc/math-emu/fmsub.c
+++ b/arch/powerpc/math-emu/fmsub.c
@@ -15,7 +15,6 @@ fmsub(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -33,7 +32,7 @@ fmsub(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
@@ -41,7 +40,7 @@ fmsub(void *frD, void *frA, void *frB, void *frC)
B_s ^= 1;
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fmsubs.c b/arch/powerpc/math-emu/fmsubs.c
index 2fdeeb9bb569..3425bc899760 100644
--- a/arch/powerpc/math-emu/fmsubs.c
+++ b/arch/powerpc/math-emu/fmsubs.c
@@ -16,7 +16,6 @@ fmsubs(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -34,7 +33,7 @@ fmsubs(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
@@ -42,7 +41,7 @@ fmsubs(void *frD, void *frA, void *frB, void *frC)
B_s ^= 1;
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fmul.c b/arch/powerpc/math-emu/fmul.c
index 391fd17d3440..2c1929779892 100644
--- a/arch/powerpc/math-emu/fmul.c
+++ b/arch/powerpc/math-emu/fmul.c
@@ -13,7 +13,6 @@ fmul(void *frD, void *frA, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -31,7 +30,7 @@ fmul(void *frD, void *frA, void *frB)
if ((A_c == FP_CLS_INF && B_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && B_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(R, A, B);
diff --git a/arch/powerpc/math-emu/fmuls.c b/arch/powerpc/math-emu/fmuls.c
index 2d3ec5f7da20..f5ad5c9c77d0 100644
--- a/arch/powerpc/math-emu/fmuls.c
+++ b/arch/powerpc/math-emu/fmuls.c
@@ -14,7 +14,6 @@ fmuls(void *frD, void *frA, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -32,7 +31,7 @@ fmuls(void *frD, void *frA, void *frB)
if ((A_c == FP_CLS_INF && B_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && B_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(R, A, B);
diff --git a/arch/powerpc/math-emu/fnmadd.c b/arch/powerpc/math-emu/fnmadd.c
index 2497b86494e5..e817bc5453ef 100644
--- a/arch/powerpc/math-emu/fnmadd.c
+++ b/arch/powerpc/math-emu/fnmadd.c
@@ -15,7 +15,6 @@ fnmadd(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -33,12 +32,12 @@ fnmadd(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fnmadds.c b/arch/powerpc/math-emu/fnmadds.c
index ee9d71e0b376..4db4b7d9ba8d 100644
--- a/arch/powerpc/math-emu/fnmadds.c
+++ b/arch/powerpc/math-emu/fnmadds.c
@@ -16,7 +16,6 @@ fnmadds(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -34,12 +33,12 @@ fnmadds(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fnmsub.c b/arch/powerpc/math-emu/fnmsub.c
index 3885a77acc93..f65979fa770e 100644
--- a/arch/powerpc/math-emu/fnmsub.c
+++ b/arch/powerpc/math-emu/fnmsub.c
@@ -15,7 +15,6 @@ fnmsub(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -33,7 +32,7 @@ fnmsub(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
@@ -41,7 +40,7 @@ fnmsub(void *frD, void *frA, void *frB, void *frC)
B_s ^= 1;
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fnmsubs.c b/arch/powerpc/math-emu/fnmsubs.c
index f835dfeb0fd1..9021dacc03b8 100644
--- a/arch/powerpc/math-emu/fnmsubs.c
+++ b/arch/powerpc/math-emu/fnmsubs.c
@@ -16,7 +16,6 @@ fnmsubs(void *frD, void *frA, void *frB, void *frC)
FP_DECL_D(C);
FP_DECL_D(T);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frA, frB, frC);
@@ -34,7 +33,7 @@ fnmsubs(void *frD, void *frA, void *frB, void *frC)
if ((A_c == FP_CLS_INF && C_c == FP_CLS_ZERO) ||
(A_c == FP_CLS_ZERO && C_c == FP_CLS_INF))
- ret |= EFLAG_VXIMZ;
+ FP_SET_EXCEPTION(EFLAG_VXIMZ);
FP_MUL_D(T, A, C);
@@ -42,7 +41,7 @@ fnmsubs(void *frD, void *frA, void *frB, void *frC)
B_s ^= 1;
if (T_s != B_s && T_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, T, B);
diff --git a/arch/powerpc/math-emu/fsqrt.c b/arch/powerpc/math-emu/fsqrt.c
index 3e90072693a0..a55fc7d49983 100644
--- a/arch/powerpc/math-emu/fsqrt.c
+++ b/arch/powerpc/math-emu/fsqrt.c
@@ -12,7 +12,6 @@ fsqrt(void *frD, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frB);
@@ -25,9 +24,9 @@ fsqrt(void *frD, void *frB)
#endif
if (B_s && B_c != FP_CLS_ZERO)
- ret |= EFLAG_VXSQRT;
+ FP_SET_EXCEPTION(EFLAG_VXSQRT);
if (B_c == FP_CLS_NAN)
- ret |= EFLAG_VXSNAN;
+ FP_SET_EXCEPTION(EFLAG_VXSNAN);
FP_SQRT_D(R, B);
diff --git a/arch/powerpc/math-emu/fsqrts.c b/arch/powerpc/math-emu/fsqrts.c
index 2843be986e2e..31dccbfc39ff 100644
--- a/arch/powerpc/math-emu/fsqrts.c
+++ b/arch/powerpc/math-emu/fsqrts.c
@@ -13,7 +13,6 @@ fsqrts(void *frD, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p %p\n", __func__, frD, frB);
@@ -26,9 +25,9 @@ fsqrts(void *frD, void *frB)
#endif
if (B_s && B_c != FP_CLS_ZERO)
- ret |= EFLAG_VXSQRT;
+ FP_SET_EXCEPTION(EFLAG_VXSQRT);
if (B_c == FP_CLS_NAN)
- ret |= EFLAG_VXSNAN;
+ FP_SET_EXCEPTION(EFLAG_VXSNAN);
FP_SQRT_D(R, B);
diff --git a/arch/powerpc/math-emu/fsub.c b/arch/powerpc/math-emu/fsub.c
index 78b09446a0e1..02c5dff458ba 100644
--- a/arch/powerpc/math-emu/fsub.c
+++ b/arch/powerpc/math-emu/fsub.c
@@ -13,7 +13,6 @@ fsub(void *frD, void *frA, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -31,7 +30,7 @@ fsub(void *frD, void *frA, void *frB)
B_s ^= 1;
if (A_s != B_s && A_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, A, B);
diff --git a/arch/powerpc/math-emu/fsubs.c b/arch/powerpc/math-emu/fsubs.c
index d3bf90863cf2..5d9b18c35e07 100644
--- a/arch/powerpc/math-emu/fsubs.c
+++ b/arch/powerpc/math-emu/fsubs.c
@@ -14,7 +14,6 @@ fsubs(void *frD, void *frA, void *frB)
FP_DECL_D(B);
FP_DECL_D(R);
FP_DECL_EX;
- int ret = 0;
#ifdef DEBUG
printk("%s: %p %p %p\n", __func__, frD, frA, frB);
@@ -32,7 +31,7 @@ fsubs(void *frD, void *frA, void *frB)
B_s ^= 1;
if (A_s != B_s && A_c == FP_CLS_INF && B_c == FP_CLS_INF)
- ret |= EFLAG_VXISI;
+ FP_SET_EXCEPTION(EFLAG_VXISI);
FP_ADD_D(R, A, B);
diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c
new file mode 100644
index 000000000000..41f4ef30e480
--- /dev/null
+++ b/arch/powerpc/math-emu/math_efp.c
@@ -0,0 +1,720 @@
+/*
+ * arch/powerpc/math-emu/math_efp.c
+ *
+ * Copyright (C) 2006-2008 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: Ebony Zhu, <ebony.zhu@freescale.com>
+ * Yu Liu, <yu.liu@freescale.com>
+ *
+ * Derived from arch/alpha/math-emu/math.c
+ * arch/powerpc/math-emu/math.c
+ *
+ * Description:
+ * This file is the exception handler to make E500 SPE instructions
+ * fully comply with IEEE-754 floating point standard.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+
+#include <asm/uaccess.h>
+#include <asm/reg.h>
+
+#define FP_EX_BOOKE_E500_SPE
+#include <asm/sfp-machine.h>
+
+#include <math-emu/soft-fp.h>
+#include <math-emu/single.h>
+#include <math-emu/double.h>
+
+#define EFAPU 0x4
+
+#define VCT 0x4
+#define SPFP 0x6
+#define DPFP 0x7
+
+#define EFSADD 0x2c0
+#define EFSSUB 0x2c1
+#define EFSABS 0x2c4
+#define EFSNABS 0x2c5
+#define EFSNEG 0x2c6
+#define EFSMUL 0x2c8
+#define EFSDIV 0x2c9
+#define EFSCMPGT 0x2cc
+#define EFSCMPLT 0x2cd
+#define EFSCMPEQ 0x2ce
+#define EFSCFD 0x2cf
+#define EFSCFSI 0x2d1
+#define EFSCTUI 0x2d4
+#define EFSCTSI 0x2d5
+#define EFSCTUF 0x2d6
+#define EFSCTSF 0x2d7
+#define EFSCTUIZ 0x2d8
+#define EFSCTSIZ 0x2da
+
+#define EVFSADD 0x280
+#define EVFSSUB 0x281
+#define EVFSABS 0x284
+#define EVFSNABS 0x285
+#define EVFSNEG 0x286
+#define EVFSMUL 0x288
+#define EVFSDIV 0x289
+#define EVFSCMPGT 0x28c
+#define EVFSCMPLT 0x28d
+#define EVFSCMPEQ 0x28e
+#define EVFSCTUI 0x294
+#define EVFSCTSI 0x295
+#define EVFSCTUF 0x296
+#define EVFSCTSF 0x297
+#define EVFSCTUIZ 0x298
+#define EVFSCTSIZ 0x29a
+
+#define EFDADD 0x2e0
+#define EFDSUB 0x2e1
+#define EFDABS 0x2e4
+#define EFDNABS 0x2e5
+#define EFDNEG 0x2e6
+#define EFDMUL 0x2e8
+#define EFDDIV 0x2e9
+#define EFDCTUIDZ 0x2ea
+#define EFDCTSIDZ 0x2eb
+#define EFDCMPGT 0x2ec
+#define EFDCMPLT 0x2ed
+#define EFDCMPEQ 0x2ee
+#define EFDCFS 0x2ef
+#define EFDCTUI 0x2f4
+#define EFDCTSI 0x2f5
+#define EFDCTUF 0x2f6
+#define EFDCTSF 0x2f7
+#define EFDCTUIZ 0x2f8
+#define EFDCTSIZ 0x2fa
+
+#define AB 2
+#define XA 3
+#define XB 4
+#define XCR 5
+#define NOTYPE 0
+
+#define SIGN_BIT_S (1UL << 31)
+#define SIGN_BIT_D (1ULL << 63)
+#define FP_EX_MASK (FP_EX_INEXACT | FP_EX_INVALID | FP_EX_DIVZERO | \
+ FP_EX_UNDERFLOW | FP_EX_OVERFLOW)
+
+union dw_union {
+ u64 dp[1];
+ u32 wp[2];
+};
+
+static unsigned long insn_type(unsigned long speinsn)
+{
+ unsigned long ret = NOTYPE;
+
+ switch (speinsn & 0x7ff) {
+ case EFSABS: ret = XA; break;
+ case EFSADD: ret = AB; break;
+ case EFSCFD: ret = XB; break;
+ case EFSCMPEQ: ret = XCR; break;
+ case EFSCMPGT: ret = XCR; break;
+ case EFSCMPLT: ret = XCR; break;
+ case EFSCTSF: ret = XB; break;
+ case EFSCTSI: ret = XB; break;
+ case EFSCTSIZ: ret = XB; break;
+ case EFSCTUF: ret = XB; break;
+ case EFSCTUI: ret = XB; break;
+ case EFSCTUIZ: ret = XB; break;
+ case EFSDIV: ret = AB; break;
+ case EFSMUL: ret = AB; break;
+ case EFSNABS: ret = XA; break;
+ case EFSNEG: ret = XA; break;
+ case EFSSUB: ret = AB; break;
+ case EFSCFSI: ret = XB; break;
+
+ case EVFSABS: ret = XA; break;
+ case EVFSADD: ret = AB; break;
+ case EVFSCMPEQ: ret = XCR; break;
+ case EVFSCMPGT: ret = XCR; break;
+ case EVFSCMPLT: ret = XCR; break;
+ case EVFSCTSF: ret = XB; break;
+ case EVFSCTSI: ret = XB; break;
+ case EVFSCTSIZ: ret = XB; break;
+ case EVFSCTUF: ret = XB; break;
+ case EVFSCTUI: ret = XB; break;
+ case EVFSCTUIZ: ret = XB; break;
+ case EVFSDIV: ret = AB; break;
+ case EVFSMUL: ret = AB; break;
+ case EVFSNABS: ret = XA; break;
+ case EVFSNEG: ret = XA; break;
+ case EVFSSUB: ret = AB; break;
+
+ case EFDABS: ret = XA; break;
+ case EFDADD: ret = AB; break;
+ case EFDCFS: ret = XB; break;
+ case EFDCMPEQ: ret = XCR; break;
+ case EFDCMPGT: ret = XCR; break;
+ case EFDCMPLT: ret = XCR; break;
+ case EFDCTSF: ret = XB; break;
+ case EFDCTSI: ret = XB; break;
+ case EFDCTSIDZ: ret = XB; break;
+ case EFDCTSIZ: ret = XB; break;
+ case EFDCTUF: ret = XB; break;
+ case EFDCTUI: ret = XB; break;
+ case EFDCTUIDZ: ret = XB; break;
+ case EFDCTUIZ: ret = XB; break;
+ case EFDDIV: ret = AB; break;
+ case EFDMUL: ret = AB; break;
+ case EFDNABS: ret = XA; break;
+ case EFDNEG: ret = XA; break;
+ case EFDSUB: ret = AB; break;
+
+ default:
+ printk(KERN_ERR "\nOoops! SPE instruction no type found.");
+ printk(KERN_ERR "\ninst code: %08lx\n", speinsn);
+ }
+
+ return ret;
+}
+
+int do_spe_mathemu(struct pt_regs *regs)
+{
+ FP_DECL_EX;
+ int IR, cmp;
+
+ unsigned long type, func, fc, fa, fb, src, speinsn;
+ union dw_union vc, va, vb;
+
+ if (get_user(speinsn, (unsigned int __user *) regs->nip))
+ return -EFAULT;
+ if ((speinsn >> 26) != EFAPU)
+ return -EINVAL; /* not an spe instruction */
+
+ type = insn_type(speinsn);
+ if (type == NOTYPE)
+ return -ENOSYS;
+
+ func = speinsn & 0x7ff;
+ fc = (speinsn >> 21) & 0x1f;
+ fa = (speinsn >> 16) & 0x1f;
+ fb = (speinsn >> 11) & 0x1f;
+ src = (speinsn >> 5) & 0x7;
+
+ vc.wp[0] = current->thread.evr[fc];
+ vc.wp[1] = regs->gpr[fc];
+ va.wp[0] = current->thread.evr[fa];
+ va.wp[1] = regs->gpr[fa];
+ vb.wp[0] = current->thread.evr[fb];
+ vb.wp[1] = regs->gpr[fb];
+
+ __FPU_FPSCR = mfspr(SPRN_SPEFSCR);
+
+#ifdef DEBUG
+ printk("speinsn:%08lx spefscr:%08lx\n", speinsn, __FPU_FPSCR);
+ printk("vc: %08x %08x\n", vc.wp[0], vc.wp[1]);
+ printk("va: %08x %08x\n", va.wp[0], va.wp[1]);
+ printk("vb: %08x %08x\n", vb.wp[0], vb.wp[1]);
+#endif
+
+ switch (src) {
+ case SPFP: {
+ FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
+
+ switch (type) {
+ case AB:
+ case XCR:
+ FP_UNPACK_SP(SA, va.wp + 1);
+ case XB:
+ FP_UNPACK_SP(SB, vb.wp + 1);
+ break;
+ case XA:
+ FP_UNPACK_SP(SA, va.wp + 1);
+ break;
+ }
+
+#ifdef DEBUG
+ printk("SA: %ld %08lx %ld (%ld)\n", SA_s, SA_f, SA_e, SA_c);
+ printk("SB: %ld %08lx %ld (%ld)\n", SB_s, SB_f, SB_e, SB_c);
+#endif
+
+ switch (func) {
+ case EFSABS:
+ vc.wp[1] = va.wp[1] & ~SIGN_BIT_S;
+ goto update_regs;
+
+ case EFSNABS:
+ vc.wp[1] = va.wp[1] | SIGN_BIT_S;
+ goto update_regs;
+
+ case EFSNEG:
+ vc.wp[1] = va.wp[1] ^ SIGN_BIT_S;
+ goto update_regs;
+
+ case EFSADD:
+ FP_ADD_S(SR, SA, SB);
+ goto pack_s;
+
+ case EFSSUB:
+ FP_SUB_S(SR, SA, SB);
+ goto pack_s;
+
+ case EFSMUL:
+ FP_MUL_S(SR, SA, SB);
+ goto pack_s;
+
+ case EFSDIV:
+ FP_DIV_S(SR, SA, SB);
+ goto pack_s;
+
+ case EFSCMPEQ:
+ cmp = 0;
+ goto cmp_s;
+
+ case EFSCMPGT:
+ cmp = 1;
+ goto cmp_s;
+
+ case EFSCMPLT:
+ cmp = -1;
+ goto cmp_s;
+
+ case EFSCTSF:
+ case EFSCTUF:
+ if (!((vb.wp[1] >> 23) == 0xff && ((vb.wp[1] & 0x7fffff) > 0))) {
+ /* NaN */
+ if (((vb.wp[1] >> 23) & 0xff) == 0) {
+ /* denorm */
+ vc.wp[1] = 0x0;
+ } else if ((vb.wp[1] >> 31) == 0) {
+ /* positive normal */
+ vc.wp[1] = (func == EFSCTSF) ?
+ 0x7fffffff : 0xffffffff;
+ } else { /* negative normal */
+ vc.wp[1] = (func == EFSCTSF) ?
+ 0x80000000 : 0x0;
+ }
+ } else { /* rB is NaN */
+ vc.wp[1] = 0x0;
+ }
+ goto update_regs;
+
+ case EFSCFD: {
+ FP_DECL_D(DB);
+ FP_CLEAR_EXCEPTIONS;
+ FP_UNPACK_DP(DB, vb.dp);
+#ifdef DEBUG
+ printk("DB: %ld %08lx %08lx %ld (%ld)\n",
+ DB_s, DB_f1, DB_f0, DB_e, DB_c);
+#endif
+ FP_CONV(S, D, 1, 2, SR, DB);
+ goto pack_s;
+ }
+
+ case EFSCTSI:
+ case EFSCTSIZ:
+ case EFSCTUI:
+ case EFSCTUIZ:
+ if (func & 0x4) {
+ _FP_ROUND(1, SB);
+ } else {
+ _FP_ROUND_ZERO(1, SB);
+ }
+ FP_TO_INT_S(vc.wp[1], SB, 32, ((func & 0x3) != 0));
+ goto update_regs;
+
+ default:
+ goto illegal;
+ }
+ break;
+
+pack_s:
+#ifdef DEBUG
+ printk("SR: %ld %08lx %ld (%ld)\n", SR_s, SR_f, SR_e, SR_c);
+#endif
+ FP_PACK_SP(vc.wp + 1, SR);
+ goto update_regs;
+
+cmp_s:
+ FP_CMP_S(IR, SA, SB, 3);
+ if (IR == 3 && (FP_ISSIGNAN_S(SA) || FP_ISSIGNAN_S(SB)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ if (IR == cmp) {
+ IR = 0x4;
+ } else {
+ IR = 0;
+ }
+ goto update_ccr;
+ }
+
+ case DPFP: {
+ FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
+
+ switch (type) {
+ case AB:
+ case XCR:
+ FP_UNPACK_DP(DA, va.dp);
+ case XB:
+ FP_UNPACK_DP(DB, vb.dp);
+ break;
+ case XA:
+ FP_UNPACK_DP(DA, va.dp);
+ break;
+ }
+
+#ifdef DEBUG
+ printk("DA: %ld %08lx %08lx %ld (%ld)\n",
+ DA_s, DA_f1, DA_f0, DA_e, DA_c);
+ printk("DB: %ld %08lx %08lx %ld (%ld)\n",
+ DB_s, DB_f1, DB_f0, DB_e, DB_c);
+#endif
+
+ switch (func) {
+ case EFDABS:
+ vc.dp[0] = va.dp[0] & ~SIGN_BIT_D;
+ goto update_regs;
+
+ case EFDNABS:
+ vc.dp[0] = va.dp[0] | SIGN_BIT_D;
+ goto update_regs;
+
+ case EFDNEG:
+ vc.dp[0] = va.dp[0] ^ SIGN_BIT_D;
+ goto update_regs;
+
+ case EFDADD:
+ FP_ADD_D(DR, DA, DB);
+ goto pack_d;
+
+ case EFDSUB:
+ FP_SUB_D(DR, DA, DB);
+ goto pack_d;
+
+ case EFDMUL:
+ FP_MUL_D(DR, DA, DB);
+ goto pack_d;
+
+ case EFDDIV:
+ FP_DIV_D(DR, DA, DB);
+ goto pack_d;
+
+ case EFDCMPEQ:
+ cmp = 0;
+ goto cmp_d;
+
+ case EFDCMPGT:
+ cmp = 1;
+ goto cmp_d;
+
+ case EFDCMPLT:
+ cmp = -1;
+ goto cmp_d;
+
+ case EFDCTSF:
+ case EFDCTUF:
+ if (!((vb.wp[0] >> 20) == 0x7ff &&
+ ((vb.wp[0] & 0xfffff) > 0 || (vb.wp[1] > 0)))) {
+ /* not a NaN */
+ if (((vb.wp[0] >> 20) & 0x7ff) == 0) {
+ /* denorm */
+ vc.wp[1] = 0x0;
+ } else if ((vb.wp[0] >> 31) == 0) {
+ /* positive normal */
+ vc.wp[1] = (func == EFDCTSF) ?
+ 0x7fffffff : 0xffffffff;
+ } else { /* negative normal */
+ vc.wp[1] = (func == EFDCTSF) ?
+ 0x80000000 : 0x0;
+ }
+ } else { /* NaN */
+ vc.wp[1] = 0x0;
+ }
+ goto update_regs;
+
+ case EFDCFS: {
+ FP_DECL_S(SB);
+ FP_CLEAR_EXCEPTIONS;
+ FP_UNPACK_SP(SB, vb.wp + 1);
+#ifdef DEBUG
+ printk("SB: %ld %08lx %ld (%ld)\n",
+ SB_s, SB_f, SB_e, SB_c);
+#endif
+ FP_CONV(D, S, 2, 1, DR, SB);
+ goto pack_d;
+ }
+
+ case EFDCTUIDZ:
+ case EFDCTSIDZ:
+ _FP_ROUND_ZERO(2, DB);
+ FP_TO_INT_D(vc.dp[0], DB, 64, ((func & 0x1) == 0));
+ goto update_regs;
+
+ case EFDCTUI:
+ case EFDCTSI:
+ case EFDCTUIZ:
+ case EFDCTSIZ:
+ if (func & 0x4) {
+ _FP_ROUND(2, DB);
+ } else {
+ _FP_ROUND_ZERO(2, DB);
+ }
+ FP_TO_INT_D(vc.wp[1], DB, 32, ((func & 0x3) != 0));
+ goto update_regs;
+
+ default:
+ goto illegal;
+ }
+ break;
+
+pack_d:
+#ifdef DEBUG
+ printk("DR: %ld %08lx %08lx %ld (%ld)\n",
+ DR_s, DR_f1, DR_f0, DR_e, DR_c);
+#endif
+ FP_PACK_DP(vc.dp, DR);
+ goto update_regs;
+
+cmp_d:
+ FP_CMP_D(IR, DA, DB, 3);
+ if (IR == 3 && (FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ if (IR == cmp) {
+ IR = 0x4;
+ } else {
+ IR = 0;
+ }
+ goto update_ccr;
+
+ }
+
+ case VCT: {
+ FP_DECL_S(SA0); FP_DECL_S(SB0); FP_DECL_S(SR0);
+ FP_DECL_S(SA1); FP_DECL_S(SB1); FP_DECL_S(SR1);
+ int IR0, IR1;
+
+ switch (type) {
+ case AB:
+ case XCR:
+ FP_UNPACK_SP(SA0, va.wp);
+ FP_UNPACK_SP(SA1, va.wp + 1);
+ case XB:
+ FP_UNPACK_SP(SB0, vb.wp);
+ FP_UNPACK_SP(SB1, vb.wp + 1);
+ break;
+ case XA:
+ FP_UNPACK_SP(SA0, va.wp);
+ FP_UNPACK_SP(SA1, va.wp + 1);
+ break;
+ }
+
+#ifdef DEBUG
+ printk("SA0: %ld %08lx %ld (%ld)\n", SA0_s, SA0_f, SA0_e, SA0_c);
+ printk("SA1: %ld %08lx %ld (%ld)\n", SA1_s, SA1_f, SA1_e, SA1_c);
+ printk("SB0: %ld %08lx %ld (%ld)\n", SB0_s, SB0_f, SB0_e, SB0_c);
+ printk("SB1: %ld %08lx %ld (%ld)\n", SB1_s, SB1_f, SB1_e, SB1_c);
+#endif
+
+ switch (func) {
+ case EVFSABS:
+ vc.wp[0] = va.wp[0] & ~SIGN_BIT_S;
+ vc.wp[1] = va.wp[1] & ~SIGN_BIT_S;
+ goto update_regs;
+
+ case EVFSNABS:
+ vc.wp[0] = va.wp[0] | SIGN_BIT_S;
+ vc.wp[1] = va.wp[1] | SIGN_BIT_S;
+ goto update_regs;
+
+ case EVFSNEG:
+ vc.wp[0] = va.wp[0] ^ SIGN_BIT_S;
+ vc.wp[1] = va.wp[1] ^ SIGN_BIT_S;
+ goto update_regs;
+
+ case EVFSADD:
+ FP_ADD_S(SR0, SA0, SB0);
+ FP_ADD_S(SR1, SA1, SB1);
+ goto pack_vs;
+
+ case EVFSSUB:
+ FP_SUB_S(SR0, SA0, SB0);
+ FP_SUB_S(SR1, SA1, SB1);
+ goto pack_vs;
+
+ case EVFSMUL:
+ FP_MUL_S(SR0, SA0, SB0);
+ FP_MUL_S(SR1, SA1, SB1);
+ goto pack_vs;
+
+ case EVFSDIV:
+ FP_DIV_S(SR0, SA0, SB0);
+ FP_DIV_S(SR1, SA1, SB1);
+ goto pack_vs;
+
+ case EVFSCMPEQ:
+ cmp = 0;
+ goto cmp_vs;
+
+ case EVFSCMPGT:
+ cmp = 1;
+ goto cmp_vs;
+
+ case EVFSCMPLT:
+ cmp = -1;
+ goto cmp_vs;
+
+ case EVFSCTSF:
+ __asm__ __volatile__ ("mtspr 512, %4\n"
+ "efsctsf %0, %2\n"
+ "efsctsf %1, %3\n"
+ : "=r" (vc.wp[0]), "=r" (vc.wp[1])
+ : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0));
+ goto update_regs;
+
+ case EVFSCTUF:
+ __asm__ __volatile__ ("mtspr 512, %4\n"
+ "efsctuf %0, %2\n"
+ "efsctuf %1, %3\n"
+ : "=r" (vc.wp[0]), "=r" (vc.wp[1])
+ : "r" (vb.wp[0]), "r" (vb.wp[1]), "r" (0));
+ goto update_regs;
+
+ case EVFSCTUI:
+ case EVFSCTSI:
+ case EVFSCTUIZ:
+ case EVFSCTSIZ:
+ if (func & 0x4) {
+ _FP_ROUND(1, SB0);
+ _FP_ROUND(1, SB1);
+ } else {
+ _FP_ROUND_ZERO(1, SB0);
+ _FP_ROUND_ZERO(1, SB1);
+ }
+ FP_TO_INT_S(vc.wp[0], SB0, 32, ((func & 0x3) != 0));
+ FP_TO_INT_S(vc.wp[1], SB1, 32, ((func & 0x3) != 0));
+ goto update_regs;
+
+ default:
+ goto illegal;
+ }
+ break;
+
+pack_vs:
+#ifdef DEBUG
+ printk("SR0: %ld %08lx %ld (%ld)\n", SR0_s, SR0_f, SR0_e, SR0_c);
+ printk("SR1: %ld %08lx %ld (%ld)\n", SR1_s, SR1_f, SR1_e, SR1_c);
+#endif
+ FP_PACK_SP(vc.wp, SR0);
+ FP_PACK_SP(vc.wp + 1, SR1);
+ goto update_regs;
+
+cmp_vs:
+ {
+ int ch, cl;
+
+ FP_CMP_S(IR0, SA0, SB0, 3);
+ FP_CMP_S(IR1, SA1, SB1, 3);
+ if (IR0 == 3 && (FP_ISSIGNAN_S(SA0) || FP_ISSIGNAN_S(SB0)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ if (IR1 == 3 && (FP_ISSIGNAN_S(SA1) || FP_ISSIGNAN_S(SB1)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ ch = (IR0 == cmp) ? 1 : 0;
+ cl = (IR1 == cmp) ? 1 : 0;
+ IR = (ch << 3) | (cl << 2) | ((ch | cl) << 1) |
+ ((ch & cl) << 0);
+ goto update_ccr;
+ }
+ }
+ default:
+ return -EINVAL;
+ }
+
+update_ccr:
+ regs->ccr &= ~(15 << ((7 - ((speinsn >> 23) & 0x7)) << 2));
+ regs->ccr |= (IR << ((7 - ((speinsn >> 23) & 0x7)) << 2));
+
+update_regs:
+ __FPU_FPSCR &= ~FP_EX_MASK;
+ __FPU_FPSCR |= (FP_CUR_EXCEPTIONS & FP_EX_MASK);
+ mtspr(SPRN_SPEFSCR, __FPU_FPSCR);
+
+ current->thread.evr[fc] = vc.wp[0];
+ regs->gpr[fc] = vc.wp[1];
+
+#ifdef DEBUG
+ printk("ccr = %08lx\n", regs->ccr);
+ printk("cur exceptions = %08x spefscr = %08lx\n",
+ FP_CUR_EXCEPTIONS, __FPU_FPSCR);
+ printk("vc: %08x %08x\n", vc.wp[0], vc.wp[1]);
+ printk("va: %08x %08x\n", va.wp[0], va.wp[1]);
+ printk("vb: %08x %08x\n", vb.wp[0], vb.wp[1]);
+#endif
+
+ return 0;
+
+illegal:
+ printk(KERN_ERR "\nOoops! IEEE-754 compliance handler encountered un-supported instruction.\ninst code: %08lx\n", speinsn);
+ return -ENOSYS;
+}
+
+int speround_handler(struct pt_regs *regs)
+{
+ union dw_union fgpr;
+ int s_lo, s_hi;
+ unsigned long speinsn, type, fc;
+
+ if (get_user(speinsn, (unsigned int __user *) regs->nip))
+ return -EFAULT;
+ if ((speinsn >> 26) != 4)
+ return -EINVAL; /* not an spe instruction */
+
+ type = insn_type(speinsn & 0x7ff);
+ if (type == XCR) return -ENOSYS;
+
+ fc = (speinsn >> 21) & 0x1f;
+ s_lo = regs->gpr[fc] & SIGN_BIT_S;
+ s_hi = current->thread.evr[fc] & SIGN_BIT_S;
+ fgpr.wp[0] = current->thread.evr[fc];
+ fgpr.wp[1] = regs->gpr[fc];
+
+ __FPU_FPSCR = mfspr(SPRN_SPEFSCR);
+
+ switch ((speinsn >> 5) & 0x7) {
+ /* Since SPE instructions on E500 core can handle round to nearest
+ * and round toward zero with IEEE-754 complied, we just need
+ * to handle round toward +Inf and round toward -Inf by software.
+ */
+ case SPFP:
+ if ((FP_ROUNDMODE) == FP_RND_PINF) {
+ if (!s_lo) fgpr.wp[1]++; /* Z > 0, choose Z1 */
+ } else { /* round to -Inf */
+ if (s_lo) fgpr.wp[1]++; /* Z < 0, choose Z2 */
+ }
+ break;
+
+ case DPFP:
+ if (FP_ROUNDMODE == FP_RND_PINF) {
+ if (!s_hi) fgpr.dp[0]++; /* Z > 0, choose Z1 */
+ } else { /* round to -Inf */
+ if (s_hi) fgpr.dp[0]++; /* Z < 0, choose Z2 */
+ }
+ break;
+
+ case VCT:
+ if (FP_ROUNDMODE == FP_RND_PINF) {
+ if (!s_lo) fgpr.wp[1]++; /* Z_low > 0, choose Z1 */
+ if (!s_hi) fgpr.wp[0]++; /* Z_high word > 0, choose Z1 */
+ } else { /* round to -Inf */
+ if (s_lo) fgpr.wp[1]++; /* Z_low < 0, choose Z2 */
+ if (s_hi) fgpr.wp[0]++; /* Z_high < 0, choose Z2 */
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ current->thread.evr[fc] = fgpr.wp[0];
+ regs->gpr[fc] = fgpr.wp[1];
+
+ return 0;
+}
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index e7392b45a5ef..953cc4a1cde5 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -6,17 +6,19 @@ ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc
endif
-obj-y := fault.o mem.o \
+obj-y := fault.o mem.o pgtable.o \
init_$(CONFIG_WORD_SIZE).o \
- pgtable_$(CONFIG_WORD_SIZE).o \
- mmu_context_$(CONFIG_WORD_SIZE).o
+ pgtable_$(CONFIG_WORD_SIZE).o
+obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
+ tlb_nohash_low.o
hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC64) += hash_utils_64.o \
slb_low.o slb.o stab.o \
gup.o mmap.o $(hash-y)
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
- tlb_$(CONFIG_WORD_SIZE).o
+ tlb_hash$(CONFIG_WORD_SIZE).o \
+ mmu_context_hash$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_40x) += 40x_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 866098686da8..91c7b8636b8a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -30,6 +30,7 @@
#include <linux/kprobes.h>
#include <linux/kdebug.h>
+#include <asm/firmware.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
@@ -283,7 +284,7 @@ good_area:
}
pte_update(ptep, 0, _PAGE_HWEXEC |
_PAGE_ACCESSED);
- _tlbie(address, mm->context.id);
+ local_flush_tlb_page(vma, address);
pte_unmap_unlock(ptep, ptl);
up_read(&mm->mmap_sem);
return 0;
@@ -318,9 +319,16 @@ good_area:
goto do_sigbus;
BUG();
}
- if (ret & VM_FAULT_MAJOR)
+ if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
- else
+#ifdef CONFIG_PPC_SMLPAR
+ if (firmware_has_feature(FW_FEATURE_CMO)) {
+ preempt_disable();
+ get_lppaca()->page_ins += (1 << PAGE_FACTOR);
+ preempt_enable();
+ }
+#endif
+ } else
current->min_flt++;
up_read(&mm->mmap_sem);
return 0;
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 7bffb70b9fe2..67850ec9feb3 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -36,36 +36,6 @@ mmu_hash_lock:
#endif /* CONFIG_SMP */
/*
- * Sync CPUs with hash_page taking & releasing the hash
- * table lock
- */
-#ifdef CONFIG_SMP
- .text
-_GLOBAL(hash_page_sync)
- mfmsr r10
- rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
- mtmsr r0
- lis r8,mmu_hash_lock@h
- ori r8,r8,mmu_hash_lock@l
- lis r0,0x0fff
- b 10f
-11: lwz r6,0(r8)
- cmpwi 0,r6,0
- bne 11b
-10: lwarx r6,0,r8
- cmpwi 0,r6,0
- bne- 11b
- stwcx. r0,0,r8
- bne- 10b
- isync
- eieio
- li r0,0
- stw r0,0(r8)
- mtmsr r10
- blr
-#endif /* CONFIG_SMP */
-
-/*
* Load a PTE into the hash table, if possible.
* The address is in r4, and r3 contains an access flag:
* _PAGE_RW (0x400) if a write.
@@ -353,8 +323,8 @@ _GLOBAL(create_hpte)
ori r8,r8,0xe14 /* clear out reserved bits and M */
andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
BEGIN_FTR_SECTION
- ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
-END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
+ rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
+END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
#ifdef CONFIG_PTE_64BIT
/* Put the XPN bits into the PTE */
rlwimi r8,r10,8,20,22
@@ -663,3 +633,80 @@ _GLOBAL(flush_hash_patch_B)
SYNC_601
isync
blr
+
+/*
+ * Flush an entry from the TLB
+ */
+_GLOBAL(_tlbie)
+#ifdef CONFIG_SMP
+ rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
+ lwz r8,TI_CPU(r8)
+ oris r8,r8,11
+ mfmsr r10
+ SYNC
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ rlwinm r0,r0,0,28,26 /* clear DR */
+ mtmsr r0
+ SYNC_601
+ isync
+ lis r9,mmu_hash_lock@h
+ ori r9,r9,mmu_hash_lock@l
+ tophys(r9,r9)
+10: lwarx r7,0,r9
+ cmpwi 0,r7,0
+ bne- 10b
+ stwcx. r8,0,r9
+ bne- 10b
+ eieio
+ tlbie r3
+ sync
+ TLBSYNC
+ li r0,0
+ stw r0,0(r9) /* clear mmu_hash_lock */
+ mtmsr r10
+ SYNC_601
+ isync
+#else /* CONFIG_SMP */
+ tlbie r3
+ sync
+#endif /* CONFIG_SMP */
+ blr
+
+/*
+ * Flush the entire TLB. 603/603e only
+ */
+_GLOBAL(_tlbia)
+#if defined(CONFIG_SMP)
+ rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
+ lwz r8,TI_CPU(r8)
+ oris r8,r8,10
+ mfmsr r10
+ SYNC
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ rlwinm r0,r0,0,28,26 /* clear DR */
+ mtmsr r0
+ SYNC_601
+ isync
+ lis r9,mmu_hash_lock@h
+ ori r9,r9,mmu_hash_lock@l
+ tophys(r9,r9)
+10: lwarx r7,0,r9
+ cmpwi 0,r7,0
+ bne- 10b
+ stwcx. r8,0,r9
+ bne- 10b
+ sync
+ tlbia
+ sync
+ TLBSYNC
+ li r0,0
+ stw r0,0(r9) /* clear mmu_hash_lock */
+ mtmsr r10
+ SYNC_601
+ isync
+#else /* CONFIG_SMP */
+ sync
+ tlbia
+ sync
+#endif /* CONFIG_SMP */
+ blr
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index f0c3b88d50fa..201c7a5486cb 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -53,8 +53,7 @@ unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */
/* Subtract one from array size because we don't need a cache for 4K since
* is not a huge page size */
-#define huge_pgtable_cache(psize) (pgtable_cache[HUGEPTE_CACHE_NUM \
- + psize-1])
+#define HUGE_PGTABLE_INDEX(psize) (HUGEPTE_CACHE_NUM + psize - 1)
#define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize])
static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = {
@@ -113,7 +112,7 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
unsigned long address, unsigned int psize)
{
- pte_t *new = kmem_cache_zalloc(huge_pgtable_cache(psize),
+ pte_t *new = kmem_cache_zalloc(pgtable_cache[HUGE_PGTABLE_INDEX(psize)],
GFP_KERNEL|__GFP_REPEAT);
if (! new)
@@ -121,7 +120,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
spin_lock(&mm->page_table_lock);
if (!hugepd_none(*hpdp))
- kmem_cache_free(huge_pgtable_cache(psize), new);
+ kmem_cache_free(pgtable_cache[HUGE_PGTABLE_INDEX(psize)], new);
else
hpdp->pd = (unsigned long)new | HUGEPD_OK;
spin_unlock(&mm->page_table_lock);
@@ -763,13 +762,14 @@ static int __init hugetlbpage_init(void)
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
if (mmu_huge_psizes[psize]) {
- huge_pgtable_cache(psize) = kmem_cache_create(
- HUGEPTE_CACHE_NAME(psize),
- HUGEPTE_TABLE_SIZE(psize),
- HUGEPTE_TABLE_SIZE(psize),
- 0,
- NULL);
- if (!huge_pgtable_cache(psize))
+ pgtable_cache[HUGE_PGTABLE_INDEX(psize)] =
+ kmem_cache_create(
+ HUGEPTE_CACHE_NAME(psize),
+ HUGEPTE_TABLE_SIZE(psize),
+ HUGEPTE_TABLE_SIZE(psize),
+ 0,
+ NULL);
+ if (!pgtable_cache[HUGE_PGTABLE_INDEX(psize)])
panic("hugetlbpage_init(): could not create %s"\
"\n", HUGEPTE_CACHE_NAME(psize));
}
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 388ceda632f3..666a5e8a5be1 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -35,7 +35,6 @@
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/io.h>
-#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
@@ -49,7 +48,7 @@
#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
-#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE))
+#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET))
#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
#endif
#endif
@@ -180,9 +179,6 @@ void __init MMU_init(void)
if (ppc_md.progress)
ppc_md.progress("MMU:setio", 0x302);
- /* Initialize the context management stuff */
- mmu_context_init();
-
if (ppc_md.progress)
ppc_md.progress("MMU:exit", 0x211);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index b9e1a1da6e52..53b06ebb3f2f 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -102,8 +102,8 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
if (!page_is_ram(pfn))
- vma_prot = __pgprot(pgprot_val(vma_prot)
- | _PAGE_GUARDED | _PAGE_NO_CACHE);
+ vma_prot = pgprot_noncached(vma_prot);
+
return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);
@@ -488,7 +488,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
* we invalidate the TLB here, thus avoiding dcbst
* misbehaviour.
*/
- _tlbie(address, 0 /* 8xx doesn't care about PID */);
+ _tlbil_va(address, 0 /* 8xx doesn't care about PID */);
#endif
/* The _PAGE_USER test should really be _PAGE_EXEC, but
* older glibc versions execute some code from no-exec
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c
deleted file mode 100644
index cc32ba41d900..000000000000
--- a/arch/powerpc/mm/mmu_context_32.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * This file contains the routines for handling the MMU on those
- * PowerPC implementations where the MMU substantially follows the
- * architecture specification. This includes the 6xx, 7xx, 7xxx,
- * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
- * -- paulus
- *
- * Derived from arch/ppc/mm/init.c:
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
- * and Cort Dougan (PReP) (cort@cs.nmt.edu)
- * Copyright (C) 1996 Paul Mackerras
- *
- * Derived from "arch/i386/mm/init.c"
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/mm.h>
-#include <linux/init.h>
-
-#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
-
-unsigned long next_mmu_context;
-unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
-#ifdef FEW_CONTEXTS
-atomic_t nr_free_contexts;
-struct mm_struct *context_mm[LAST_CONTEXT+1];
-void steal_context(void);
-#endif /* FEW_CONTEXTS */
-
-/*
- * Initialize the context management stuff.
- */
-void __init
-mmu_context_init(void)
-{
- /*
- * Some processors have too few contexts to reserve one for
- * init_mm, and require using context 0 for a normal task.
- * Other processors reserve the use of context zero for the kernel.
- * This code assumes FIRST_CONTEXT < 32.
- */
- context_map[0] = (1 << FIRST_CONTEXT) - 1;
- next_mmu_context = FIRST_CONTEXT;
-#ifdef FEW_CONTEXTS
- atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
-#endif /* FEW_CONTEXTS */
-}
-
-#ifdef FEW_CONTEXTS
-/*
- * Steal a context from a task that has one at the moment.
- * This is only used on 8xx and 4xx and we presently assume that
- * they don't do SMP. If they do then this will have to check
- * whether the MM we steal is in use.
- * We also assume that this is only used on systems that don't
- * use an MMU hash table - this is true for 8xx and 4xx.
- * This isn't an LRU system, it just frees up each context in
- * turn (sort-of pseudo-random replacement :). This would be the
- * place to implement an LRU scheme if anyone was motivated to do it.
- * -- paulus
- */
-void
-steal_context(void)
-{
- struct mm_struct *mm;
-
- /* free up context `next_mmu_context' */
- /* if we shouldn't free context 0, don't... */
- if (next_mmu_context < FIRST_CONTEXT)
- next_mmu_context = FIRST_CONTEXT;
- mm = context_mm[next_mmu_context];
- flush_tlb_mm(mm);
- destroy_context(mm);
-}
-#endif /* FEW_CONTEXTS */
diff --git a/arch/powerpc/mm/mmu_context_hash32.c b/arch/powerpc/mm/mmu_context_hash32.c
new file mode 100644
index 000000000000..0dfba2bf7f31
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_hash32.c
@@ -0,0 +1,103 @@
+/*
+ * This file contains the routines for handling the MMU on those
+ * PowerPC implementations where the MMU substantially follows the
+ * architecture specification. This includes the 6xx, 7xx, 7xxx,
+ * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
+ * -- paulus
+ *
+ * Derived from arch/ppc/mm/init.c:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+/*
+ * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
+ * (virtual segment identifiers) for each context. Although the
+ * hardware supports 24-bit VSIDs, and thus >1 million contexts,
+ * we only use 32,768 of them. That is ample, since there can be
+ * at most around 30,000 tasks in the system anyway, and it means
+ * that we can use a bitmap to indicate which contexts are in use.
+ * Using a bitmap means that we entirely avoid all of the problems
+ * that we used to have when the context number overflowed,
+ * particularly on SMP systems.
+ * -- paulus.
+ */
+#define NO_CONTEXT ((unsigned long) -1)
+#define LAST_CONTEXT 32767
+#define FIRST_CONTEXT 1
+
+/*
+ * This function defines the mapping from contexts to VSIDs (virtual
+ * segment IDs). We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table. Note, if this
+ * function is changed then arch/ppc/mm/hashtable.S will have to be
+ * changed to correspond.
+ *
+ *
+ * CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
+ * & 0xffffff)
+ */
+
+static unsigned long next_mmu_context;
+static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+
+
+/*
+ * Set up the context for a new address space.
+ */
+int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+ unsigned long ctx = next_mmu_context;
+
+ while (test_and_set_bit(ctx, context_map)) {
+ ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
+ if (ctx > LAST_CONTEXT)
+ ctx = 0;
+ }
+ next_mmu_context = (ctx + 1) & LAST_CONTEXT;
+ mm->context.id = ctx;
+
+ return 0;
+}
+
+/*
+ * We're finished using the context for an address space.
+ */
+void destroy_context(struct mm_struct *mm)
+{
+ preempt_disable();
+ if (mm->context.id != NO_CONTEXT) {
+ clear_bit(mm->context.id, context_map);
+ mm->context.id = NO_CONTEXT;
+ }
+ preempt_enable();
+}
+
+/*
+ * Initialize the context management stuff.
+ */
+void __init mmu_context_init(void)
+{
+ /* Reserve context 0 for kernel use */
+ context_map[0] = (1 << FIRST_CONTEXT) - 1;
+ next_mmu_context = FIRST_CONTEXT;
+}
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 1db38ba1f544..dbeb86ac90cd 100644
--- a/arch/powerpc/mm/mmu_context_64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -24,6 +24,14 @@
static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDR(mmu_context_idr);
+/*
+ * The proto-VSID space has 2^35 - 1 segments available for user mappings.
+ * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
+ * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
+ */
+#define NO_CONTEXT 0
+#define MAX_CONTEXT ((1UL << 19) - 1)
+
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
int index;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
new file mode 100644
index 000000000000..52a0cfc38b64
--- /dev/null
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -0,0 +1,397 @@
+/*
+ * This file contains the routines for handling the MMU on those
+ * PowerPC implementations where the MMU is not using the hash
+ * table, such as 8xx, 4xx, BookE's etc...
+ *
+ * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
+ * IBM Corp.
+ *
+ * Derived from previous arch/powerpc/mm/mmu_context.c
+ * and arch/powerpc/include/asm/mmu_context.h
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * TODO:
+ *
+ * - The global context lock will not scale very well
+ * - The maps should be dynamically allocated to allow for processors
+ * that support more PID bits at runtime
+ * - Implement flush_tlb_mm() by making the context stale and picking
+ * a new one
+ * - More aggressively clear stale map bits and maybe find some way to
+ * also clear mm->cpu_vm_mask bits when processes are migrated
+ */
+
+#undef DEBUG
+#define DEBUG_STEAL_ONLY
+#undef DEBUG_MAP_CONSISTENCY
+/*#define DEBUG_CLAMP_LAST_CONTEXT 15 */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/bootmem.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+
+static unsigned int first_context, last_context;
+static unsigned int next_context, nr_free_contexts;
+static unsigned long *context_map;
+static unsigned long *stale_map[NR_CPUS];
+static struct mm_struct **context_mm;
+static spinlock_t context_lock = SPIN_LOCK_UNLOCKED;
+
+#define CTX_MAP_SIZE \
+ (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
+
+
+/* Steal a context from a task that has one at the moment.
+ *
+ * This is used when we are running out of available PID numbers
+ * on the processors.
+ *
+ * This isn't an LRU system, it just frees up each context in
+ * turn (sort-of pseudo-random replacement :). This would be the
+ * place to implement an LRU scheme if anyone was motivated to do it.
+ * -- paulus
+ *
+ * For context stealing, we use a slightly different approach for
+ * SMP and UP. Basically, the UP one is simpler and doesn't use
+ * the stale map as we can just flush the local CPU
+ * -- benh
+ */
+#ifdef CONFIG_SMP
+static unsigned int steal_context_smp(unsigned int id)
+{
+ struct mm_struct *mm;
+ unsigned int cpu, max;
+
+ again:
+ max = last_context - first_context;
+
+ /* Attempt to free next_context first and then loop until we manage */
+ while (max--) {
+ /* Pick up the victim mm */
+ mm = context_mm[id];
+
+ /* We have a candidate victim, check if it's active, on SMP
+ * we cannot steal active contexts
+ */
+ if (mm->context.active) {
+ id++;
+ if (id > last_context)
+ id = first_context;
+ continue;
+ }
+ pr_debug("[%d] steal context %d from mm @%p\n",
+ smp_processor_id(), id, mm);
+
+ /* Mark this mm has having no context anymore */
+ mm->context.id = MMU_NO_CONTEXT;
+
+ /* Mark it stale on all CPUs that used this mm */
+ for_each_cpu_mask_nr(cpu, mm->cpu_vm_mask)
+ __set_bit(id, stale_map[cpu]);
+ return id;
+ }
+
+ /* This will happen if you have more CPUs than available contexts,
+ * all we can do here is wait a bit and try again
+ */
+ spin_unlock(&context_lock);
+ cpu_relax();
+ spin_lock(&context_lock);
+ goto again;
+}
+#endif /* CONFIG_SMP */
+
+/* Note that this will also be called on SMP if all other CPUs are
+ * offlined, which means that it may be called for cpu != 0. For
+ * this to work, we somewhat assume that CPUs that are onlined
+ * come up with a fully clean TLB (or are cleaned when offlined)
+ */
+static unsigned int steal_context_up(unsigned int id)
+{
+ struct mm_struct *mm;
+ int cpu = smp_processor_id();
+
+ /* Pick up the victim mm */
+ mm = context_mm[id];
+
+ pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm);
+
+ /* Mark this mm has having no context anymore */
+ mm->context.id = MMU_NO_CONTEXT;
+
+ /* Flush the TLB for that context */
+ local_flush_tlb_mm(mm);
+
+ /* XXX This clear should ultimately be part of local_flush_tlb_mm */
+ __clear_bit(id, stale_map[cpu]);
+
+ return id;
+}
+
+#ifdef DEBUG_MAP_CONSISTENCY
+static void context_check_map(void)
+{
+ unsigned int id, nrf, nact;
+
+ nrf = nact = 0;
+ for (id = first_context; id <= last_context; id++) {
+ int used = test_bit(id, context_map);
+ if (!used)
+ nrf++;
+ if (used != (context_mm[id] != NULL))
+ pr_err("MMU: Context %d is %s and MM is %p !\n",
+ id, used ? "used" : "free", context_mm[id]);
+ if (context_mm[id] != NULL)
+ nact += context_mm[id]->context.active;
+ }
+ if (nrf != nr_free_contexts) {
+ pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
+ nr_free_contexts, nrf);
+ nr_free_contexts = nrf;
+ }
+ if (nact > num_online_cpus())
+ pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
+ nact, num_online_cpus());
+ if (first_context > 0 && !test_bit(0, context_map))
+ pr_err("MMU: Context 0 has been freed !!!\n");
+}
+#else
+static void context_check_map(void) { }
+#endif
+
+void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
+{
+ unsigned int id, cpu = smp_processor_id();
+ unsigned long *map;
+
+ /* No lockless fast path .. yet */
+ spin_lock(&context_lock);
+
+#ifndef DEBUG_STEAL_ONLY
+ pr_debug("[%d] activating context for mm @%p, active=%d, id=%d\n",
+ cpu, next, next->context.active, next->context.id);
+#endif
+
+#ifdef CONFIG_SMP
+ /* Mark us active and the previous one not anymore */
+ next->context.active++;
+ if (prev) {
+#ifndef DEBUG_STEAL_ONLY
+ pr_debug(" old context %p active was: %d\n",
+ prev, prev->context.active);
+#endif
+ WARN_ON(prev->context.active < 1);
+ prev->context.active--;
+ }
+#endif /* CONFIG_SMP */
+
+ /* If we already have a valid assigned context, skip all that */
+ id = next->context.id;
+ if (likely(id != MMU_NO_CONTEXT))
+ goto ctxt_ok;
+
+ /* We really don't have a context, let's try to acquire one */
+ id = next_context;
+ if (id > last_context)
+ id = first_context;
+ map = context_map;
+
+ /* No more free contexts, let's try to steal one */
+ if (nr_free_contexts == 0) {
+#ifdef CONFIG_SMP
+ if (num_online_cpus() > 1) {
+ id = steal_context_smp(id);
+ goto stolen;
+ }
+#endif /* CONFIG_SMP */
+ id = steal_context_up(id);
+ goto stolen;
+ }
+ nr_free_contexts--;
+
+ /* We know there's at least one free context, try to find it */
+ while (__test_and_set_bit(id, map)) {
+ id = find_next_zero_bit(map, last_context+1, id);
+ if (id > last_context)
+ id = first_context;
+ }
+ stolen:
+ next_context = id + 1;
+ context_mm[id] = next;
+ next->context.id = id;
+
+#ifndef DEBUG_STEAL_ONLY
+ pr_debug("[%d] picked up new id %d, nrf is now %d\n",
+ cpu, id, nr_free_contexts);
+#endif
+
+ context_check_map();
+ ctxt_ok:
+
+ /* If that context got marked stale on this CPU, then flush the
+ * local TLB for it and unmark it before we use it
+ */
+ if (test_bit(id, stale_map[cpu])) {
+ pr_debug("[%d] flushing stale context %d for mm @%p !\n",
+ cpu, id, next);
+ local_flush_tlb_mm(next);
+
+ /* XXX This clear should ultimately be part of local_flush_tlb_mm */
+ __clear_bit(id, stale_map[cpu]);
+ }
+
+ /* Flick the MMU and release lock */
+ set_context(id, next->pgd);
+ spin_unlock(&context_lock);
+}
+
+/*
+ * Set up the context for a new address space.
+ */
+int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+ mm->context.id = MMU_NO_CONTEXT;
+ mm->context.active = 0;
+
+ return 0;
+}
+
+/*
+ * We're finished using the context for an address space.
+ */
+void destroy_context(struct mm_struct *mm)
+{
+ unsigned int id;
+
+ if (mm->context.id == MMU_NO_CONTEXT)
+ return;
+
+ WARN_ON(mm->context.active != 0);
+
+ spin_lock(&context_lock);
+ id = mm->context.id;
+ if (id != MMU_NO_CONTEXT) {
+ __clear_bit(id, context_map);
+ mm->context.id = MMU_NO_CONTEXT;
+#ifdef DEBUG_MAP_CONSISTENCY
+ mm->context.active = 0;
+ context_mm[id] = NULL;
+#endif
+ nr_free_contexts++;
+ }
+ spin_unlock(&context_lock);
+}
+
+#ifdef CONFIG_SMP
+
+static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned int)(long)hcpu;
+
+ /* We don't touch CPU 0 map, it's allocated at aboot and kept
+ * around forever
+ */
+ if (cpu == 0)
+ return NOTIFY_OK;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ pr_debug("MMU: Allocating stale context map for CPU %d\n", cpu);
+ stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
+ break;
+#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ pr_debug("MMU: Freeing stale context map for CPU %d\n", cpu);
+ kfree(stale_map[cpu]);
+ stale_map[cpu] = NULL;
+ break;
+#endif
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
+ .notifier_call = mmu_context_cpu_notify,
+};
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Initialize the context management stuff.
+ */
+void __init mmu_context_init(void)
+{
+ /* Mark init_mm as being active on all possible CPUs since
+ * we'll get called with prev == init_mm the first time
+ * we schedule on a given CPU
+ */
+ init_mm.context.active = NR_CPUS;
+
+ /*
+ * The MPC8xx has only 16 contexts. We rotate through them on each
+ * task switch. A better way would be to keep track of tasks that
+ * own contexts, and implement an LRU usage. That way very active
+ * tasks don't always have to pay the TLB reload overhead. The
+ * kernel pages are mapped shared, so the kernel can run on behalf
+ * of any task that makes a kernel entry. Shared does not mean they
+ * are not protected, just that the ASID comparison is not performed.
+ * -- Dan
+ *
+ * The IBM4xx has 256 contexts, so we can just rotate through these
+ * as a way of "switching" contexts. If the TID of the TLB is zero,
+ * the PID/TID comparison is disabled, so we can use a TID of zero
+ * to represent all kernel pages as shared among all contexts.
+ * -- Dan
+ */
+ if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
+ first_context = 0;
+ last_context = 15;
+ } else {
+ first_context = 1;
+ last_context = 255;
+ }
+
+#ifdef DEBUG_CLAMP_LAST_CONTEXT
+ last_context = DEBUG_CLAMP_LAST_CONTEXT;
+#endif
+ /*
+ * Allocate the maps used by context management
+ */
+ context_map = alloc_bootmem(CTX_MAP_SIZE);
+ context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1));
+ stale_map[0] = alloc_bootmem(CTX_MAP_SIZE);
+
+#ifdef CONFIG_SMP
+ register_cpu_notifier(&mmu_context_cpu_nb);
+#endif
+
+ printk(KERN_INFO
+ "MMU: Allocated %d bytes of context maps for %d contexts\n",
+ 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
+ last_context - first_context + 1);
+
+ /*
+ * Some processors have too few contexts to reserve one for
+ * init_mm, and require using context 0 for a normal task.
+ * Other processors reserve the use of context zero for the kernel.
+ * This code assumes first_context < 32.
+ */
+ context_map[0] = (1 << first_context) - 1;
+ next_context = first_context;
+ nr_free_contexts = last_context - first_context + 1;
+}
+
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index fab3cfad4099..4314b39b6faf 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -22,10 +22,58 @@
#include <asm/tlbflush.h>
#include <asm/mmu.h>
+#ifdef CONFIG_PPC_MMU_NOHASH
+
+/*
+ * On 40x and 8xx, we directly inline tlbia and tlbivax
+ */
+#if defined(CONFIG_40x) || defined(CONFIG_8xx)
+static inline void _tlbil_all(void)
+{
+ asm volatile ("sync; tlbia; isync" : : : "memory")
+}
+static inline void _tlbil_pid(unsigned int pid)
+{
+ asm volatile ("sync; tlbia; isync" : : : "memory")
+}
+#else /* CONFIG_40x || CONFIG_8xx */
+extern void _tlbil_all(void);
+extern void _tlbil_pid(unsigned int pid);
+#endif /* !(CONFIG_40x || CONFIG_8xx) */
+
+/*
+ * On 8xx, we directly inline tlbie, on others, it's extern
+ */
+#ifdef CONFIG_8xx
+static inline void _tlbil_va(unsigned long address, unsigned int pid)
+{
+ asm volatile ("tlbie %0; sync" : : "r" (address) : "memory")
+}
+#else /* CONFIG_8xx */
+extern void _tlbil_va(unsigned long address, unsigned int pid);
+#endif /* CONIFG_8xx */
+
+/*
+ * As of today, we don't support tlbivax broadcast on any
+ * implementation. When that becomes the case, this will be
+ * an extern.
+ */
+static inline void _tlbivax_bcast(unsigned long address, unsigned int pid)
+{
+ BUG();
+}
+
+#else /* CONFIG_PPC_MMU_NOHASH */
+
extern void hash_preload(struct mm_struct *mm, unsigned long ea,
unsigned long access, unsigned long trap);
+extern void _tlbie(unsigned long address);
+extern void _tlbia(void);
+
+#endif /* CONFIG_PPC_MMU_NOHASH */
+
#ifdef CONFIG_PPC32
extern void mapin_ram(void);
extern int map_page(unsigned long va, phys_addr_t pa, int flags);
@@ -58,17 +106,14 @@ extern phys_addr_t lowmem_end_addr;
* architectures. -- Dan
*/
#if defined(CONFIG_8xx)
-#define flush_HPTE(X, va, pg) _tlbie(va, 0 /* 8xx doesn't care about PID */)
#define MMU_init_hw() do { } while(0)
#define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx)
-#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
#elif defined(CONFIG_FSL_BOOKE)
-#define flush_HPTE(pid, va, pg) _tlbie(va, pid)
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
extern void adjust_total_lowmem(void);
@@ -77,18 +122,4 @@ extern void adjust_total_lowmem(void);
/* anything 32-bit except 4xx or 8xx */
extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(void);
-
-/* Be careful....this needs to be updated if we ever encounter 603 SMPs,
- * which includes all new 82xx processors. We need tlbie/tlbsync here
- * in that case (I think). -- Dan.
- */
-static inline void flush_HPTE(unsigned context, unsigned long va,
- unsigned long pdval)
-{
- if ((Hash != 0) &&
- cpu_has_feature(CPU_FTR_HPTE_TABLE))
- flush_hash_pages(0, va, pdval, 1);
- else
- _tlbie(va);
-}
#endif
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
new file mode 100644
index 000000000000..6d94116fdea1
--- /dev/null
+++ b/arch/powerpc/mm/pgtable.c
@@ -0,0 +1,117 @@
+/*
+ * This file contains common routines for dealing with free of page tables
+ *
+ * Derived from arch/powerpc/mm/tlb_64.c:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * Dave Engebretsen <engebret@us.ibm.com>
+ * Rework for PPC64 port.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+
+static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
+static unsigned long pte_freelist_forced_free;
+
+struct pte_freelist_batch
+{
+ struct rcu_head rcu;
+ unsigned int index;
+ pgtable_free_t tables[0];
+};
+
+#define PTE_FREELIST_SIZE \
+ ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
+ / sizeof(pgtable_free_t))
+
+static void pte_free_smp_sync(void *arg)
+{
+ /* Do nothing, just ensure we sync with all CPUs */
+}
+
+/* This is only called when we are critically out of memory
+ * (and fail to get a page in pte_free_tlb).
+ */
+static void pgtable_free_now(pgtable_free_t pgf)
+{
+ pte_freelist_forced_free++;
+
+ smp_call_function(pte_free_smp_sync, NULL, 1);
+
+ pgtable_free(pgf);
+}
+
+static void pte_free_rcu_callback(struct rcu_head *head)
+{
+ struct pte_freelist_batch *batch =
+ container_of(head, struct pte_freelist_batch, rcu);
+ unsigned int i;
+
+ for (i = 0; i < batch->index; i++)
+ pgtable_free(batch->tables[i]);
+
+ free_page((unsigned long)batch);
+}
+
+static void pte_free_submit(struct pte_freelist_batch *batch)
+{
+ INIT_RCU_HEAD(&batch->rcu);
+ call_rcu(&batch->rcu, pte_free_rcu_callback);
+}
+
+void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
+{
+ /* This is safe since tlb_gather_mmu has disabled preemption */
+ cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
+ struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
+
+ if (atomic_read(&tlb->mm->mm_users) < 2 ||
+ cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
+ pgtable_free(pgf);
+ return;
+ }
+
+ if (*batchp == NULL) {
+ *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
+ if (*batchp == NULL) {
+ pgtable_free_now(pgf);
+ return;
+ }
+ (*batchp)->index = 0;
+ }
+ (*batchp)->tables[(*batchp)->index++] = pgf;
+ if ((*batchp)->index == PTE_FREELIST_SIZE) {
+ pte_free_submit(*batchp);
+ *batchp = NULL;
+ }
+}
+
+void pte_free_finish(void)
+{
+ /* This is safe since tlb_gather_mmu has disabled preemption */
+ struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
+
+ if (*batchp == NULL)
+ return;
+ pte_free_submit(*batchp);
+ *batchp = NULL;
+}
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index c31d6d26f0b5..38ff35f2142a 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -48,10 +48,6 @@ EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
extern char etext[], _stext[];
-#ifdef CONFIG_SMP
-extern void hash_page_sync(void);
-#endif
-
#ifdef HAVE_BATS
extern phys_addr_t v_mapped_by_bats(unsigned long va);
extern unsigned long p_mapped_by_bats(phys_addr_t pa);
@@ -72,24 +68,29 @@ extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
#define p_mapped_by_tlbcam(x) (0UL)
#endif /* HAVE_TLBCAM */
-#ifdef CONFIG_PTE_64BIT
-/* Some processors use an 8kB pgdir because they have 8-byte Linux PTEs. */
-#define PGDIR_ORDER 1
-#else
-#define PGDIR_ORDER 0
-#endif
+#define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret;
- ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGDIR_ORDER);
+ /* pgdir take page or two with 4K pages and a page fraction otherwise */
+#ifndef CONFIG_PPC_4K_PAGES
+ ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
+#else
+ ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
+ PGDIR_ORDER - PAGE_SHIFT);
+#endif
return ret;
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- free_pages((unsigned long)pgd, PGDIR_ORDER);
+#ifndef CONFIG_PPC_4K_PAGES
+ kfree((void *)pgd);
+#else
+ free_pages((unsigned long)pgd, PGDIR_ORDER - PAGE_SHIFT);
+#endif
}
__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
@@ -125,23 +126,6 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
return ptepage;
}
-void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
-#ifdef CONFIG_SMP
- hash_page_sync();
-#endif
- free_page((unsigned long)pte);
-}
-
-void pte_free(struct mm_struct *mm, pgtable_t ptepage)
-{
-#ifdef CONFIG_SMP
- hash_page_sync();
-#endif
- pgtable_page_dtor(ptepage);
- __free_page(ptepage);
-}
-
void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
@@ -194,6 +178,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
if (p < 16*1024*1024)
p += _ISA_MEM_BASE;
+#ifndef CONFIG_CRASH_DUMP
/*
* Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that.
@@ -203,6 +188,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
(unsigned long long)p, __builtin_return_address(0));
return NULL;
}
+#endif
if (size == 0)
return NULL;
@@ -288,7 +274,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
}
/*
- * Map in a big chunk of physical memory starting at KERNELBASE.
+ * Map in a big chunk of physical memory starting at PAGE_OFFSET.
*/
void __init mapin_ram(void)
{
@@ -297,7 +283,7 @@ void __init mapin_ram(void)
int ktext;
s = mmu_mapin_ram();
- v = KERNELBASE + s;
+ v = PAGE_OFFSET + s;
p = memstart_addr + s;
for (; s < total_lowmem; s += PAGE_SIZE) {
ktext = ((char *) v >= _stext && (char *) v < etext);
@@ -363,7 +349,11 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
return -EINVAL;
set_pte_at(&init_mm, address, kpte, mk_pte(page, prot));
wmb();
- flush_HPTE(0, address, pmd_val(*kpmd));
+#ifdef CONFIG_PPC_STD_MMU
+ flush_hash_pages(0, address, pmd_val(*kpmd), 1);
+#else
+ flush_tlb_page(NULL, address);
+#endif
pte_unmap(kpte);
return 0;
@@ -400,7 +390,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
#endif /* CONFIG_DEBUG_PAGEALLOC */
static int fixmaps;
-unsigned long FIXADDR_TOP = 0xfffff000;
+unsigned long FIXADDR_TOP = (-PAGE_SIZE);
EXPORT_SYMBOL(FIXADDR_TOP);
void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 6aa120813775..45d925360b89 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -95,16 +95,16 @@ unsigned long __init mmu_mapin_ram(void)
break;
}
- setbat(2, KERNELBASE, 0, bl, _PAGE_RAM);
- done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;
+ setbat(2, PAGE_OFFSET, 0, bl, _PAGE_RAM);
+ done = (unsigned long)bat_addrs[2].limit - PAGE_OFFSET + 1;
if ((done < tot) && !bat_addrs[3].limit) {
/* use BAT3 to cover a bit more */
tot -= done;
for (bl = 128<<10; bl < max_size; bl <<= 1)
if (bl * 2 > tot)
break;
- setbat(3, KERNELBASE+done, done, bl, _PAGE_RAM);
- done = (unsigned long)bat_addrs[3].limit - KERNELBASE + 1;
+ setbat(3, PAGE_OFFSET+done, done, bl, _PAGE_RAM);
+ done = (unsigned long)bat_addrs[3].limit - PAGE_OFFSET + 1;
}
return done;
@@ -192,7 +192,7 @@ void __init MMU_init_hw(void)
extern unsigned int hash_page[];
extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
- if (!cpu_has_feature(CPU_FTR_HPTE_TABLE)) {
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
/*
* Put a blr (procedure return) instruction at the
* start of hash_page, since we can still get DSI
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_hash32.c
index f9a47fee3927..65190587a365 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_hash32.c
@@ -137,6 +137,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
flush_range(&init_mm, start, end);
FINISH_FLUSH;
}
+EXPORT_SYMBOL(flush_tlb_kernel_range);
/*
* Flush all the (user) entries for the address space described by mm.
@@ -160,6 +161,7 @@ void flush_tlb_mm(struct mm_struct *mm)
flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
FINISH_FLUSH;
}
+EXPORT_SYMBOL(flush_tlb_mm);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
@@ -176,6 +178,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
FINISH_FLUSH;
}
+EXPORT_SYMBOL(flush_tlb_page);
/*
* For each address in the range, find the pte for the address
@@ -188,3 +191,4 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
flush_range(vma->vm_mm, start, end);
FINISH_FLUSH;
}
+EXPORT_SYMBOL(flush_tlb_range);
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_hash64.c
index be7dd422c0fa..c931bc7d1079 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -37,81 +37,6 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
* arch/powerpc/include/asm/tlb.h file -- tgall
*/
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
-static unsigned long pte_freelist_forced_free;
-
-struct pte_freelist_batch
-{
- struct rcu_head rcu;
- unsigned int index;
- pgtable_free_t tables[0];
-};
-
-#define PTE_FREELIST_SIZE \
- ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \
- / sizeof(pgtable_free_t))
-
-static void pte_free_smp_sync(void *arg)
-{
- /* Do nothing, just ensure we sync with all CPUs */
-}
-
-/* This is only called when we are critically out of memory
- * (and fail to get a page in pte_free_tlb).
- */
-static void pgtable_free_now(pgtable_free_t pgf)
-{
- pte_freelist_forced_free++;
-
- smp_call_function(pte_free_smp_sync, NULL, 1);
-
- pgtable_free(pgf);
-}
-
-static void pte_free_rcu_callback(struct rcu_head *head)
-{
- struct pte_freelist_batch *batch =
- container_of(head, struct pte_freelist_batch, rcu);
- unsigned int i;
-
- for (i = 0; i < batch->index; i++)
- pgtable_free(batch->tables[i]);
-
- free_page((unsigned long)batch);
-}
-
-static void pte_free_submit(struct pte_freelist_batch *batch)
-{
- INIT_RCU_HEAD(&batch->rcu);
- call_rcu(&batch->rcu, pte_free_rcu_callback);
-}
-
-void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
-{
- /* This is safe since tlb_gather_mmu has disabled preemption */
- cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
- struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
-
- if (atomic_read(&tlb->mm->mm_users) < 2 ||
- cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
- pgtable_free(pgf);
- return;
- }
-
- if (*batchp == NULL) {
- *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
- if (*batchp == NULL) {
- pgtable_free_now(pgf);
- return;
- }
- (*batchp)->index = 0;
- }
- (*batchp)->tables[(*batchp)->index++] = pgf;
- if ((*batchp)->index == PTE_FREELIST_SIZE) {
- pte_free_submit(*batchp);
- *batchp = NULL;
- }
-}
/*
* A linux PTE was changed and the corresponding hash table entry
@@ -229,17 +154,6 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
batch->index = 0;
}
-void pte_free_finish(void)
-{
- /* This is safe since tlb_gather_mmu has disabled preemption */
- struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
-
- if (*batchp == NULL)
- return;
- pte_free_submit(*batchp);
- *batchp = NULL;
-}
-
/**
* __flush_hash_table_range - Flush all HPTEs for a given address range
* from the hash table (and the TLB). But keeps
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
new file mode 100644
index 000000000000..803a64c02b06
--- /dev/null
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -0,0 +1,209 @@
+/*
+ * This file contains the routines for TLB flushing.
+ * On machines where the MMU does not use a hash table to store virtual to
+ * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
+ * this does -not- include 603 however which shares the implementation with
+ * hash based processors)
+ *
+ * -- BenH
+ *
+ * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
+ * IBM Corp.
+ *
+ * Derived from arch/ppc/mm/init.c:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/preempt.h>
+#include <linux/spinlock.h>
+
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+
+#include "mmu_decl.h"
+
+/*
+ * Base TLB flushing operations:
+ *
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes kernel pages
+ *
+ * - local_* variants of page and mm only apply to the current
+ * processor
+ */
+
+/*
+ * These are the base non-SMP variants of page and mm flushing
+ */
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ unsigned int pid;
+
+ preempt_disable();
+ pid = mm->context.id;
+ if (pid != MMU_NO_CONTEXT)
+ _tlbil_pid(pid);
+ preempt_enable();
+}
+EXPORT_SYMBOL(local_flush_tlb_mm);
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ unsigned int pid;
+
+ preempt_disable();
+ pid = vma ? vma->vm_mm->context.id : 0;
+ if (pid != MMU_NO_CONTEXT)
+ _tlbil_va(vmaddr, pid);
+ preempt_enable();
+}
+EXPORT_SYMBOL(local_flush_tlb_page);
+
+
+/*
+ * And here are the SMP non-local implementations
+ */
+#ifdef CONFIG_SMP
+
+static DEFINE_SPINLOCK(tlbivax_lock);
+
+struct tlb_flush_param {
+ unsigned long addr;
+ unsigned int pid;
+};
+
+static void do_flush_tlb_mm_ipi(void *param)
+{
+ struct tlb_flush_param *p = param;
+
+ _tlbil_pid(p ? p->pid : 0);
+}
+
+static void do_flush_tlb_page_ipi(void *param)
+{
+ struct tlb_flush_param *p = param;
+
+ _tlbil_va(p->addr, p->pid);
+}
+
+
+/* Note on invalidations and PID:
+ *
+ * We snapshot the PID with preempt disabled. At this point, it can still
+ * change either because:
+ * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
+ * - we are invaliating some target that isn't currently running here
+ * and is concurrently acquiring a new PID on another CPU
+ * - some other CPU is re-acquiring a lost PID for this mm
+ * etc...
+ *
+ * However, this shouldn't be a problem as we only guarantee
+ * invalidation of TLB entries present prior to this call, so we
+ * don't care about the PID changing, and invalidating a stale PID
+ * is generally harmless.
+ */
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ cpumask_t cpu_mask;
+ unsigned int pid;
+
+ preempt_disable();
+ pid = mm->context.id;
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto no_context;
+ cpu_mask = mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+ if (!cpus_empty(cpu_mask)) {
+ struct tlb_flush_param p = { .pid = pid };
+ smp_call_function_mask(cpu_mask, do_flush_tlb_mm_ipi, &p, 1);
+ }
+ _tlbil_pid(pid);
+ no_context:
+ preempt_enable();
+}
+EXPORT_SYMBOL(flush_tlb_mm);
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ cpumask_t cpu_mask;
+ unsigned int pid;
+
+ preempt_disable();
+ pid = vma ? vma->vm_mm->context.id : 0;
+ if (unlikely(pid == MMU_NO_CONTEXT))
+ goto bail;
+ cpu_mask = vma->vm_mm->cpu_vm_mask;
+ cpu_clear(smp_processor_id(), cpu_mask);
+ if (!cpus_empty(cpu_mask)) {
+ /* If broadcast tlbivax is supported, use it */
+ if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
+ int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
+ if (lock)
+ spin_lock(&tlbivax_lock);
+ _tlbivax_bcast(vmaddr, pid);
+ if (lock)
+ spin_unlock(&tlbivax_lock);
+ goto bail;
+ } else {
+ struct tlb_flush_param p = { .pid = pid, .addr = vmaddr };
+ smp_call_function_mask(cpu_mask,
+ do_flush_tlb_page_ipi, &p, 1);
+ }
+ }
+ _tlbil_va(vmaddr, pid);
+ bail:
+ preempt_enable();
+}
+EXPORT_SYMBOL(flush_tlb_page);
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Flush kernel TLB entries in the given range
+ */
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
+ _tlbil_pid(0);
+ preempt_enable();
+#endif
+ _tlbil_pid(0);
+}
+EXPORT_SYMBOL(flush_tlb_kernel_range);
+
+/*
+ * Currently, for range flushing, we just do a full mm flush. This should
+ * be optimized based on a threshold on the size of the range, since
+ * some implementation can stack multiple tlbivax before a tlbsync but
+ * for now, we keep it that way
+ */
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+
+{
+ flush_tlb_mm(vma->vm_mm);
+}
+EXPORT_SYMBOL(flush_tlb_range);
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
new file mode 100644
index 000000000000..f900a39e6ec4
--- /dev/null
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -0,0 +1,166 @@
+/*
+ * This file contains low-level functions for performing various
+ * types of TLB invalidations on various processors with no hash
+ * table.
+ *
+ * This file implements the following functions for all no-hash
+ * processors. Some aren't implemented for some variants. Some
+ * are inline in tlbflush.h
+ *
+ * - tlbil_va
+ * - tlbil_pid
+ * - tlbil_all
+ * - tlbivax_bcast (not yet)
+ *
+ * Code mostly moved over from misc_32.S
+ *
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Partially rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * Paul Mackerras, Kumar Gala and Benjamin Herrenschmidt.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/cputable.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/processor.h>
+
+#if defined(CONFIG_40x)
+
+/*
+ * 40x implementation needs only tlbil_va
+ */
+_GLOBAL(_tlbil_va)
+ /* We run the search with interrupts disabled because we have to change
+ * the PID and I don't want to preempt when that happens.
+ */
+ mfmsr r5
+ mfspr r6,SPRN_PID
+ wrteei 0
+ mtspr SPRN_PID,r4
+ tlbsx. r3, 0, r3
+ mtspr SPRN_PID,r6
+ wrtee r5
+ bne 1f
+ sync
+ /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is
+ * clear. Since 25 is the V bit in the TLB_TAG, loading this value
+ * will invalidate the TLB entry. */
+ tlbwe r3, r3, TLB_TAG
+ isync
+1: blr
+
+#elif defined(CONFIG_8xx)
+
+/*
+ * Nothing to do for 8xx, everything is inline
+ */
+
+#elif defined(CONFIG_44x)
+
+/*
+ * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
+ * of the TLB for everything else.
+ */
+_GLOBAL(_tlbil_va)
+ mfspr r5,SPRN_MMUCR
+ rlwimi r5,r4,0,24,31 /* Set TID */
+
+ /* We have to run the search with interrupts disabled, otherwise
+ * an interrupt which causes a TLB miss can clobber the MMUCR
+ * between the mtspr and the tlbsx.
+ *
+ * Critical and Machine Check interrupts take care of saving
+ * and restoring MMUCR, so only normal interrupts have to be
+ * taken care of.
+ */
+ mfmsr r4
+ wrteei 0
+ mtspr SPRN_MMUCR,r5
+ tlbsx. r3, 0, r3
+ wrtee r4
+ bne 1f
+ sync
+ /* There are only 64 TLB entries, so r3 < 64,
+ * which means bit 22, is clear. Since 22 is
+ * the V bit in the TLB_PAGEID, loading this
+ * value will invalidate the TLB entry.
+ */
+ tlbwe r3, r3, PPC44x_TLB_PAGEID
+ isync
+1: blr
+
+_GLOBAL(_tlbil_all)
+_GLOBAL(_tlbil_pid)
+ li r3,0
+ sync
+
+ /* Load high watermark */
+ lis r4,tlb_44x_hwater@ha
+ lwz r5,tlb_44x_hwater@l(r4)
+
+1: tlbwe r3,r3,PPC44x_TLB_PAGEID
+ addi r3,r3,1
+ cmpw 0,r3,r5
+ ble 1b
+
+ isync
+ blr
+
+#elif defined(CONFIG_FSL_BOOKE)
+/*
+ * FSL BookE implementations. Currently _pid and _all are the
+ * same. This will change when tlbilx is actually supported and
+ * performs invalidate-by-PID. This change will be driven by
+ * mmu_features conditional
+ */
+
+/*
+ * Flush MMU TLB on the local processor
+ */
+_GLOBAL(_tlbil_pid)
+_GLOBAL(_tlbil_all)
+#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
+ MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
+ li r3,(MMUCSR0_TLBFI)@l
+ mtspr SPRN_MMUCSR0, r3
+1:
+ mfspr r3,SPRN_MMUCSR0
+ andi. r3,r3,MMUCSR0_TLBFI@l
+ bne 1b
+ msync
+ isync
+ blr
+
+/*
+ * Flush MMU TLB for a particular address, but only on the local processor
+ * (no broadcast)
+ */
+_GLOBAL(_tlbil_va)
+ mfmsr r10
+ wrteei 0
+ slwi r4,r4,16
+ mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
+ tlbsx 0,r3
+ mfspr r4,SPRN_MAS1 /* check valid */
+ andis. r3,r4,MAS1_VALID@h
+ beq 1f
+ rlwinm r4,r4,0,1,31
+ mtspr SPRN_MAS1,r4
+ tlbwe
+ msync
+ isync
+1: wrtee r10
+ blr
+#elif
+#error Unsupported processor type !
+#endif
diff --git a/arch/powerpc/platforms/40x/ep405.c b/arch/powerpc/platforms/40x/ep405.c
index ae2e7f67c18e..4058fd1e7fc7 100644
--- a/arch/powerpc/platforms/40x/ep405.c
+++ b/arch/powerpc/platforms/40x/ep405.c
@@ -100,7 +100,7 @@ static void __init ep405_setup_arch(void)
/* Find & init the BCSR CPLD */
ep405_init_bcsr();
- ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
+ ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
}
static int __init ep405_probe(void)
diff --git a/arch/powerpc/platforms/40x/kilauea.c b/arch/powerpc/platforms/40x/kilauea.c
index 1dd24ffc0dc1..fd7d934dac8b 100644
--- a/arch/powerpc/platforms/40x/kilauea.c
+++ b/arch/powerpc/platforms/40x/kilauea.c
@@ -44,7 +44,7 @@ static int __init kilauea_probe(void)
if (!of_flat_dt_is_compatible(root, "amcc,kilauea"))
return 0;
- ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
+ ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
return 1;
}
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index 4498a86b46c3..f40ac9b8f99f 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -61,7 +61,7 @@ static int __init ppc40x_probe(void)
for (i = 0; i < ARRAY_SIZE(board); i++) {
if (of_flat_dt_is_compatible(root, board[i])) {
- ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
+ ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
return 1;
}
}
diff --git a/arch/powerpc/platforms/44x/ebony.c b/arch/powerpc/platforms/44x/ebony.c
index a0e8fe4662f6..88b9117fa691 100644
--- a/arch/powerpc/platforms/44x/ebony.c
+++ b/arch/powerpc/platforms/44x/ebony.c
@@ -54,7 +54,7 @@ static int __init ebony_probe(void)
if (!of_flat_dt_is_compatible(root, "ibm,ebony"))
return 0;
- ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
+ ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
return 1;
}
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c
index 29671262801f..76fdc51dac8b 100644
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -69,7 +69,7 @@ static int __init ppc44x_probe(void)
for (i = 0; i < ARRAY_SIZE(board); i++) {
if (of_flat_dt_is_compatible(root, board[i])) {
- ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
+ ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
return 1;
}
}
diff --git a/arch/powerpc/platforms/44x/sam440ep.c b/arch/powerpc/platforms/44x/sam440ep.c
index 47f10e647735..a78e8eb6da41 100644
--- a/arch/powerpc/platforms/44x/sam440ep.c
+++ b/arch/powerpc/platforms/44x/sam440ep.c
@@ -51,7 +51,7 @@ static int __init sam440ep_probe(void)
if (!of_flat_dt_is_compatible(root, "acube,sam440ep"))
return 0;
- ppc_pci_flags = PPC_PCI_REASSIGN_ALL_RSRC;
+ ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC);
return 1;
}
diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
index fe92e65103ed..b5c753db125e 100644
--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
@@ -3,7 +3,6 @@
#include <asm/io.h>
#include <asm/time.h>
#include <asm/mpc52xx.h>
-#include "mpc52xx_pic.h"
/* defined in lite5200_sleep.S and only used here */
extern void lite5200_low_power(void __iomem *sram, void __iomem *mbar);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
index b49a18527661..c3f2c21024e3 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
@@ -375,7 +375,7 @@ mpc52xx_add_bridge(struct device_node *node)
pr_debug("Adding MPC52xx PCI host bridge %s\n", node->full_name);
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
if (of_address_to_resource(node, 0, &rsrc) != 0) {
printk(KERN_ERR "Can't get %s resources\n", node->full_name);
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 8479394e9ab4..72865e8e4b51 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -2,20 +2,100 @@
*
* Programmable Interrupt Controller functions for the Freescale MPC52xx.
*
+ * Copyright (C) 2008 Secret Lab Technologies Ltd.
* Copyright (C) 2006 bplan GmbH
+ * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
+ * Copyright (C) 2003 Montavista Software, Inc
*
* Based on the code from the 2.4 kernel by
* Dale Farnsworth <dfarnsworth@mvista.com> and Kent Borg.
*
- * Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003 Montavista Software, Inc
- *
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*
*/
+/*
+ * This is the device driver for the MPC5200 interrupt controller.
+ *
+ * hardware overview
+ * -----------------
+ * The MPC5200 interrupt controller groups the all interrupt sources into
+ * three groups called 'critical', 'main', and 'peripheral'. The critical
+ * group has 3 irqs, External IRQ0, slice timer 0 irq, and wake from deep
+ * sleep. Main group include the other 3 external IRQs, slice timer 1, RTC,
+ * gpios, and the general purpose timers. Peripheral group contains the
+ * remaining irq sources from all of the on-chip peripherals (PSCs, Ethernet,
+ * USB, DMA, etc).
+ *
+ * virqs
+ * -----
+ * The Linux IRQ subsystem requires that each irq source be assigned a
+ * system wide unique IRQ number starting at 1 (0 means no irq). Since
+ * systems can have multiple interrupt controllers, the virtual IRQ (virq)
+ * infrastructure lets each interrupt controller to define a local set
+ * of IRQ numbers and the virq infrastructure maps those numbers into
+ * a unique range of the global IRQ# space.
+ *
+ * To define a range of virq numbers for this controller, this driver first
+ * assigns a number to each of the irq groups (called the level 1 or L1
+ * value). Within each group individual irq sources are also assigned a
+ * number, as defined by the MPC5200 user guide, and refers to it as the
+ * level 2 or L2 value. The virq number is determined by shifting up the
+ * L1 value by MPC52xx_IRQ_L1_OFFSET and ORing it with the L2 value.
+ *
+ * For example, the TMR0 interrupt is irq 9 in the main group. The
+ * virq for TMR0 is calculated by ((1 << MPC52xx_IRQ_L1_OFFSET) | 9).
+ *
+ * The observant reader will also notice that this driver defines a 4th
+ * interrupt group called 'bestcomm'. The bestcomm group isn't physically
+ * part of the MPC5200 interrupt controller, but it is used here to assign
+ * a separate virq number for each bestcomm task (since any of the 16
+ * bestcomm tasks can cause the bestcomm interrupt to be raised). When a
+ * bestcomm interrupt occurs (peripheral group, irq 0) this driver determines
+ * which task needs servicing and returns the irq number for that task. This
+ * allows drivers which use bestcomm to define their own interrupt handlers.
+ *
+ * irq_chip structures
+ * -------------------
+ * For actually manipulating IRQs (masking, enabling, clearing, etc) this
+ * driver defines four separate 'irq_chip' structures, one for the main
+ * group, one for the peripherals group, one for the bestcomm group and one
+ * for external interrupts. The irq_chip structures provide the hooks needed
+ * to manipulate each IRQ source, and since each group is has a separate set
+ * of registers for controlling the irq, it makes sense to divide up the
+ * hooks along those lines.
+ *
+ * You'll notice that there is not an irq_chip for the critical group and
+ * you'll also notice that there is an irq_chip defined for external
+ * interrupts even though there is no external interrupt group. The reason
+ * for this is that the four external interrupts are all managed with the same
+ * register even though one of the external IRQs is in the critical group and
+ * the other three are in the main group. For this reason it makes sense for
+ * the 4 external irqs to be managed using a separate set of hooks. The
+ * reason there is no crit irq_chip is that of the 3 irqs in the critical
+ * group, only external interrupt is actually support at this time by this
+ * driver and since external interrupt is the only one used, it can just
+ * be directed to make use of the external irq irq_chip.
+ *
+ * device tree bindings
+ * --------------------
+ * The device tree bindings for this controller reflect the two level
+ * organization of irqs in the device. #interrupt-cells = <3> where the
+ * first cell is the group number [0..3], the second cell is the irq
+ * number in the group, and the third cell is the sense type (level/edge).
+ * For reference, the following is a list of the interrupt property values
+ * associated with external interrupt sources on the MPC5200 (just because
+ * it is non-obvious to determine what the interrupts property should be
+ * when reading the mpc5200 manual and it is a frequently asked question).
+ *
+ * External interrupts:
+ * <0 0 n> external irq0, n is sense (n=0: level high,
+ * <1 1 n> external irq1, n is sense n=1: edge rising,
+ * <1 2 n> external irq2, n is sense n=2: edge falling,
+ * <1 3 n> external irq3, n is sense n=3: level low)
+ */
#undef DEBUG
#include <linux/interrupt.h>
@@ -24,11 +104,19 @@
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/mpc52xx.h>
-#include "mpc52xx_pic.h"
-/*
- *
-*/
+/* HW IRQ mapping */
+#define MPC52xx_IRQ_L1_CRIT (0)
+#define MPC52xx_IRQ_L1_MAIN (1)
+#define MPC52xx_IRQ_L1_PERP (2)
+#define MPC52xx_IRQ_L1_SDMA (3)
+
+#define MPC52xx_IRQ_L1_OFFSET (6)
+#define MPC52xx_IRQ_L1_MASK (0x00c0)
+#define MPC52xx_IRQ_L2_MASK (0x003f)
+
+#define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0)
+
/* MPC5200 device tree match tables */
static struct of_device_id mpc52xx_pic_ids[] __initdata = {
@@ -53,10 +141,7 @@ static unsigned char mpc52xx_map_senses[4] = {
IRQ_TYPE_LEVEL_LOW,
};
-/*
- *
-*/
-
+/* Utility functions */
static inline void io_be_setbit(u32 __iomem *addr, int bitno)
{
out_be32(addr, in_be32(addr) | (1 << bitno));
@@ -69,15 +154,14 @@ static inline void io_be_clrbit(u32 __iomem *addr, int bitno)
/*
* IRQ[0-3] interrupt irq_chip
-*/
-
+ */
static void mpc52xx_extirq_mask(unsigned int virq)
{
int irq;
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -90,7 +174,7 @@ static void mpc52xx_extirq_unmask(unsigned int virq)
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -103,7 +187,7 @@ static void mpc52xx_extirq_ack(unsigned int virq)
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -117,7 +201,7 @@ static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type)
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, irq, l2irq, flow_type);
@@ -156,15 +240,14 @@ static struct irq_chip mpc52xx_extirq_irqchip = {
/*
* Main interrupt irq_chip
-*/
-
+ */
static void mpc52xx_main_mask(unsigned int virq)
{
int irq;
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -177,7 +260,7 @@ static void mpc52xx_main_unmask(unsigned int virq)
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -193,15 +276,14 @@ static struct irq_chip mpc52xx_main_irqchip = {
/*
* Peripherals interrupt irq_chip
-*/
-
+ */
static void mpc52xx_periph_mask(unsigned int virq)
{
int irq;
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -214,7 +296,7 @@ static void mpc52xx_periph_unmask(unsigned int virq)
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -230,15 +312,14 @@ static struct irq_chip mpc52xx_periph_irqchip = {
/*
* SDMA interrupt irq_chip
-*/
-
+ */
static void mpc52xx_sdma_mask(unsigned int virq)
{
int irq;
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -251,7 +332,7 @@ static void mpc52xx_sdma_unmask(unsigned int virq)
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -264,7 +345,7 @@ static void mpc52xx_sdma_ack(unsigned int virq)
int l2irq;
irq = irq_map[virq].hwirq;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
pr_debug("%s: irq=%x. l2=%d\n", __func__, irq, l2irq);
@@ -278,13 +359,12 @@ static struct irq_chip mpc52xx_sdma_irqchip = {
.ack = mpc52xx_sdma_ack,
};
-/*
- * irq_host
-*/
-
+/**
+ * mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property
+ */
static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
- u32 * intspec, unsigned int intsize,
- irq_hw_number_t * out_hwirq,
+ u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
{
int intrvect_l1;
@@ -299,10 +379,9 @@ static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
intrvect_l2 = (int)intspec[1];
intrvect_type = (int)intspec[2];
- intrvect_linux =
- (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) & MPC52xx_IRQ_L1_MASK;
- intrvect_linux |=
- (intrvect_l2 << MPC52xx_IRQ_L2_OFFSET) & MPC52xx_IRQ_L2_MASK;
+ intrvect_linux = (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) &
+ MPC52xx_IRQ_L1_MASK;
+ intrvect_linux |= intrvect_l2 & MPC52xx_IRQ_L2_MASK;
pr_debug("return %x, l1=%d, l2=%d\n", intrvect_linux, intrvect_l1,
intrvect_l2);
@@ -313,11 +392,11 @@ static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
return 0;
}
-/*
- * this function retrieves the correct IRQ type out
- * of the MPC regs
- * Only externals IRQs needs this
-*/
+/**
+ * mpc52xx_irqx_gettype - determine the IRQ sense type (level/edge)
+ *
+ * Only external IRQs need this.
+ */
static int mpc52xx_irqx_gettype(int irq)
{
int type;
@@ -329,6 +408,9 @@ static int mpc52xx_irqx_gettype(int irq)
return mpc52xx_map_senses[type];
}
+/**
+ * mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure
+ */
static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t irq)
{
@@ -339,7 +421,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
int type;
l1irq = (irq & MPC52xx_IRQ_L1_MASK) >> MPC52xx_IRQ_L1_OFFSET;
- l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+ l2irq = irq & MPC52xx_IRQ_L2_MASK;
/*
* Most of ours IRQs will be level low
@@ -379,8 +461,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
break;
default:
- pr_debug("%s: Error, unknown L1 IRQ (0x%x)\n", __func__, l1irq);
- printk(KERN_ERR "Unknow IRQ!\n");
+ pr_err("%s: invalid virq requested (0x%x)\n", __func__, virq);
return -EINVAL;
}
@@ -406,10 +487,15 @@ static struct irq_host_ops mpc52xx_irqhost_ops = {
.map = mpc52xx_irqhost_map,
};
-/*
- * init (public)
-*/
-
+/**
+ * mpc52xx_init_irq - Initialize and register with the virq subsystem
+ *
+ * Hook for setting up IRQs on an mpc5200 system. A pointer to this function
+ * is to be put into the machine definition structure.
+ *
+ * This function searches the device tree for an MPC5200 interrupt controller,
+ * initializes it, and registers it with the virq subsystem.
+ */
void __init mpc52xx_init_irq(void)
{
u32 intr_ctrl;
@@ -454,7 +540,6 @@ void __init mpc52xx_init_irq(void)
* As last step, add an irq host to translate the real
* hw irq information provided by the ofw to linux virq
*/
-
mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_HOST_MAP_LINEAR,
MPC52xx_IRQ_HIGHTESTHWIRQ,
&mpc52xx_irqhost_ops, -1);
@@ -462,12 +547,38 @@ void __init mpc52xx_init_irq(void)
if (!mpc52xx_irqhost)
panic(__FILE__ ": Cannot allocate the IRQ host\n");
- printk(KERN_INFO "MPC52xx PIC is up and running!\n");
+ irq_set_default_host(mpc52xx_irqhost);
+
+ pr_info("MPC52xx PIC is up and running!\n");
}
-/*
- * get_irq (public)
-*/
+/**
+ * mpc52xx_get_irq - Get pending interrupt number hook function
+ *
+ * Called by the interupt handler to determine what IRQ handler needs to be
+ * executed.
+ *
+ * Status of pending interrupts is determined by reading the encoded status
+ * register. The encoded status register has three fields; one for each of the
+ * types of interrupts defined by the controller - 'critical', 'main' and
+ * 'peripheral'. This function reads the status register and returns the IRQ
+ * number associated with the highest priority pending interrupt. 'Critical'
+ * interrupts have the highest priority, followed by 'main' interrupts, and
+ * then 'peripheral'.
+ *
+ * The mpc5200 interrupt controller can be configured to boost the priority
+ * of individual 'peripheral' interrupts. If this is the case then a special
+ * value will appear in either the crit or main fields indicating a high
+ * or medium priority peripheral irq has occurred.
+ *
+ * This function checks each of the 3 irq request fields and returns the
+ * first pending interrupt that it finds.
+ *
+ * This function also identifies a 4th type of interrupt; 'bestcomm'. Each
+ * bestcomm DMA task can raise the bestcomm peripheral interrupt. When this
+ * occurs at task-specific IRQ# is decoded so that each task can have its
+ * own IRQ handler.
+ */
unsigned int mpc52xx_get_irq(void)
{
u32 status;
@@ -478,25 +589,21 @@ unsigned int mpc52xx_get_irq(void)
irq = (status >> 8) & 0x3;
if (irq == 2) /* high priority peripheral */
goto peripheral;
- irq |= (MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET) &
- MPC52xx_IRQ_L1_MASK;
+ irq |= (MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET);
} else if (status & 0x00200000) { /* main */
irq = (status >> 16) & 0x1f;
if (irq == 4) /* low priority peripheral */
goto peripheral;
- irq |= (MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET) &
- MPC52xx_IRQ_L1_MASK;
+ irq |= (MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET);
} else if (status & 0x20000000) { /* peripheral */
peripheral:
irq = (status >> 24) & 0x1f;
if (irq == 0) { /* bestcomm */
status = in_be32(&sdma->IntPend);
irq = ffs(status) - 1;
- irq |= (MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET) &
- MPC52xx_IRQ_L1_MASK;
+ irq |= (MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET);
} else {
- irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET) &
- MPC52xx_IRQ_L1_MASK;
+ irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET);
}
}
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.h b/arch/powerpc/platforms/52xx/mpc52xx_pic.h
deleted file mode 100644
index 1a26bcdb3049..000000000000
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Header file for Freescale MPC52xx Interrupt controller
- *
- * Copyright (C) 2004-2005 Sylvain Munaut <tnt@246tNt.com>
- * Copyright (C) 2003 MontaVista, Software, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef __POWERPC_SYSDEV_MPC52xx_PIC_H__
-#define __POWERPC_SYSDEV_MPC52xx_PIC_H__
-
-#include <asm/types.h>
-
-
-/* HW IRQ mapping */
-#define MPC52xx_IRQ_L1_CRIT (0)
-#define MPC52xx_IRQ_L1_MAIN (1)
-#define MPC52xx_IRQ_L1_PERP (2)
-#define MPC52xx_IRQ_L1_SDMA (3)
-
-#define MPC52xx_IRQ_L1_OFFSET (6)
-#define MPC52xx_IRQ_L1_MASK (0x00c0)
-
-#define MPC52xx_IRQ_L2_OFFSET (0)
-#define MPC52xx_IRQ_L2_MASK (0x003f)
-
-#define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0)
-
-
-/* Interrupt controller Register set */
-struct mpc52xx_intr {
- u32 per_mask; /* INTR + 0x00 */
- u32 per_pri1; /* INTR + 0x04 */
- u32 per_pri2; /* INTR + 0x08 */
- u32 per_pri3; /* INTR + 0x0c */
- u32 ctrl; /* INTR + 0x10 */
- u32 main_mask; /* INTR + 0x14 */
- u32 main_pri1; /* INTR + 0x18 */
- u32 main_pri2; /* INTR + 0x1c */
- u32 reserved1; /* INTR + 0x20 */
- u32 enc_status; /* INTR + 0x24 */
- u32 crit_status; /* INTR + 0x28 */
- u32 main_status; /* INTR + 0x2c */
- u32 per_status; /* INTR + 0x30 */
- u32 reserved2; /* INTR + 0x34 */
- u32 per_error; /* INTR + 0x38 */
-};
-
-#endif /* __POWERPC_SYSDEV_MPC52xx_PIC_H__ */
-
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
index c72d3304387f..a55b0b6813ed 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
@@ -5,9 +5,6 @@
#include <asm/cacheflush.h>
#include <asm/mpc52xx.h>
-#include "mpc52xx_pic.h"
-
-
/* these are defined in mpc52xx_sleep.S, and only used here */
extern void mpc52xx_deep_sleep(void __iomem *sram, void __iomem *sdram_regs,
struct mpc52xx_cdm __iomem *, struct mpc52xx_intr __iomem*);
diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c
index 1b75902fad64..9761a59f175f 100644
--- a/arch/powerpc/platforms/82xx/pq2.c
+++ b/arch/powerpc/platforms/82xx/pq2.c
@@ -53,7 +53,7 @@ static void __init pq2_pci_add_bridge(struct device_node *np)
if (of_address_to_resource(np, 0, &r) || r.end - r.start < 0x10b)
goto err;
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
hose = pcibios_alloc_controller(np);
if (!hose)
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index cb3054e1001d..f0798c09980f 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -1,6 +1,8 @@
#
# Makefile for the PowerPC 85xx linux kernel.
#
+obj-$(CONFIG_SMP) += smp.o
+
obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o
obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads.o
obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 613bf8c2e30d..a8301c8ad537 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -63,6 +63,7 @@ void __init mpc85xx_ds_pic_init(void)
struct device_node *cascade_node = NULL;
int cascade_irq;
#endif
+ unsigned long root = of_get_flat_dt_root();
np = of_find_node_by_type(NULL, "open-pic");
if (np == NULL) {
@@ -76,11 +77,19 @@ void __init mpc85xx_ds_pic_init(void)
return;
}
- mpic = mpic_alloc(np, r.start,
+ if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
+ mpic = mpic_alloc(np, r.start,
+ MPIC_PRIMARY |
+ MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
+ 0, 256, " OpenPIC ");
+ } else {
+ mpic = mpic_alloc(np, r.start,
MPIC_PRIMARY | MPIC_WANTS_RESET |
MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
+ }
+
BUG_ON(mpic == NULL);
of_node_put(np);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 2494c5155919..658a36fab3ab 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -231,7 +231,7 @@ static void __init mpc85xx_mds_setup_arch(void)
static int __init board_fixups(void)
{
- char phy_id[BUS_ID_SIZE];
+ char phy_id[20];
char *compstrs[2] = {"fsl,gianfar-mdio", "fsl,ucc-mdio"};
struct device_node *mdio;
struct resource res;
@@ -241,13 +241,15 @@ static int __init board_fixups(void)
mdio = of_find_compatible_node(NULL, NULL, compstrs[i]);
of_address_to_resource(mdio, 0, &res);
- snprintf(phy_id, BUS_ID_SIZE, "%x:%02x", res.start, 1);
+ snprintf(phy_id, sizeof(phy_id), "%llx:%02x",
+ (unsigned long long)res.start, 1);
phy_register_fixup_for_id(phy_id, mpc8568_fixup_125_clock);
phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups);
/* Register a workaround for errata */
- snprintf(phy_id, BUS_ID_SIZE, "%x:%02x", res.start, 7);
+ snprintf(phy_id, sizeof(phy_id), "%llx:%02x",
+ (unsigned long long)res.start, 7);
phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups);
of_node_put(mdio);
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
new file mode 100644
index 000000000000..d652c713f496
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -0,0 +1,104 @@
+/*
+ * Author: Andy Fleming <afleming@freescale.com>
+ * Kumar Gala <galak@kernel.crashing.org>
+ *
+ * Copyright 2006-2008 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+
+#include <asm/machdep.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/mpic.h>
+#include <asm/cacheflush.h>
+
+#include <sysdev/fsl_soc.h>
+
+extern volatile unsigned long __secondary_hold_acknowledge;
+extern void __early_start(void);
+
+#define BOOT_ENTRY_ADDR_UPPER 0
+#define BOOT_ENTRY_ADDR_LOWER 1
+#define BOOT_ENTRY_R3_UPPER 2
+#define BOOT_ENTRY_R3_LOWER 3
+#define BOOT_ENTRY_RESV 4
+#define BOOT_ENTRY_PIR 5
+#define BOOT_ENTRY_R6_UPPER 6
+#define BOOT_ENTRY_R6_LOWER 7
+#define NUM_BOOT_ENTRY 8
+#define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32))
+
+static void __init
+smp_85xx_kick_cpu(int nr)
+{
+ unsigned long flags;
+ const u64 *cpu_rel_addr;
+ __iomem u32 *bptr_vaddr;
+ struct device_node *np;
+ int n = 0;
+
+ WARN_ON (nr < 0 || nr >= NR_CPUS);
+
+ pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
+
+ local_irq_save(flags);
+
+ np = of_get_cpu_node(nr, NULL);
+ cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
+
+ if (cpu_rel_addr == NULL) {
+ printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
+ return;
+ }
+
+ /* Map the spin table */
+ bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+
+ out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
+ out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
+
+ /* Wait a bit for the CPU to ack. */
+ while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
+ mdelay(1);
+
+ iounmap(bptr_vaddr);
+
+ local_irq_restore(flags);
+
+ pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
+}
+
+static void __init
+smp_85xx_setup_cpu(int cpu_nr)
+{
+ mpic_setup_this_cpu();
+
+ /* Clear any pending timer interrupts */
+ mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
+
+ /* Enable decrementer interrupt */
+ mtspr(SPRN_TCR, TCR_DIE);
+}
+
+struct smp_ops_t smp_85xx_ops = {
+ .message_pass = smp_mpic_message_pass,
+ .probe = smp_mpic_probe,
+ .kick_cpu = smp_85xx_kick_cpu,
+ .setup_cpu = smp_85xx_setup_cpu,
+};
+
+void __init
+mpc85xx_smp_init(void)
+{
+ smp_ops = &smp_85xx_ops;
+}
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 77dd797a2580..8e5693935975 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -34,6 +34,8 @@ config MPC8610_HPCD
config GEF_SBC610
bool "GE Fanuc SBC610"
select DEFAULT_UIMAGE
+ select GENERIC_GPIO
+ select ARCH_REQUIRE_GPIOLIB
select HAS_RAPIDIO
help
This option enables support for GE Fanuc's SBC610.
diff --git a/arch/powerpc/platforms/86xx/Makefile b/arch/powerpc/platforms/86xx/Makefile
index 4a56ff619afd..31e540c2ebbc 100644
--- a/arch/powerpc/platforms/86xx/Makefile
+++ b/arch/powerpc/platforms/86xx/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_SMP) += mpc86xx_smp.o
obj-$(CONFIG_MPC8641_HPCN) += mpc86xx_hpcn.o
obj-$(CONFIG_SBC8641D) += sbc8641d.o
obj-$(CONFIG_MPC8610_HPCD) += mpc8610_hpcd.o
-obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o gef_pic.o
+gef-gpio-$(CONFIG_GPIOLIB) += gef_gpio.o
+obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o gef_pic.o $(gef-gpio-y)
diff --git a/arch/powerpc/platforms/86xx/gef_gpio.c b/arch/powerpc/platforms/86xx/gef_gpio.c
new file mode 100644
index 000000000000..85b2800f4cb7
--- /dev/null
+++ b/arch/powerpc/platforms/86xx/gef_gpio.c
@@ -0,0 +1,143 @@
+/*
+ * Driver for GE Fanuc's FPGA based GPIO pins
+ *
+ * Author: Martyn Welch <martyn.welch@gefanuc.com>
+ *
+ * 2008 (c) GE Fanuc Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/* TODO
+ *
+ * Configuration of output modes (totem-pole/open-drain)
+ * Interrupt configuration - interrupts are always generated the FPGA relies on
+ * the I/O interrupt controllers mask to stop them propergating
+ */
+
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+
+#define GEF_GPIO_DIRECT 0x00
+#define GEF_GPIO_IN 0x04
+#define GEF_GPIO_OUT 0x08
+#define GEF_GPIO_TRIG 0x0C
+#define GEF_GPIO_POLAR_A 0x10
+#define GEF_GPIO_POLAR_B 0x14
+#define GEF_GPIO_INT_STAT 0x18
+#define GEF_GPIO_OVERRUN 0x1C
+#define GEF_GPIO_MODE 0x20
+
+#define NUM_GPIO 19
+
+static void _gef_gpio_set(void __iomem *reg, unsigned int offset, int value)
+{
+ unsigned int data;
+
+ data = ioread32be(reg);
+ /* value: 0=low; 1=high */
+ if (value & 0x1)
+ data = data | (0x1 << offset);
+ else
+ data = data & ~(0x1 << offset);
+
+ iowrite32be(data, reg);
+}
+
+
+static int gef_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned int data;
+ struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
+
+ data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
+ data = data | (0x1 << offset);
+ iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
+
+ return 0;
+}
+
+static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
+{
+ unsigned int data;
+ struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
+
+ /* Set direction before switching to input */
+ _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
+
+ data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
+ data = data & ~(0x1 << offset);
+ iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
+
+ return 0;
+}
+
+static int gef_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned int data;
+ int state = 0;
+ struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
+
+ data = ioread32be(mmchip->regs + GEF_GPIO_IN);
+ state = (int)((data >> offset) & 0x1);
+
+ return state;
+}
+
+static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
+
+ _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
+}
+
+static int __init gef_gpio_init(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, NULL, "gef,sbc610-gpio") {
+ int retval;
+ struct of_mm_gpio_chip *gef_gpio_chip;
+
+ pr_debug("%s: Initialising GEF GPIO\n", np->full_name);
+
+ /* Allocate chip structure */
+ gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
+ if (!gef_gpio_chip) {
+ pr_err("%s: Unable to allocate structure\n",
+ np->full_name);
+ continue;
+ }
+
+ /* Setup pointers to chip functions */
+ gef_gpio_chip->of_gc.gpio_cells = 2;
+ gef_gpio_chip->of_gc.gc.ngpio = NUM_GPIO;
+ gef_gpio_chip->of_gc.gc.direction_input = gef_gpio_dir_in;
+ gef_gpio_chip->of_gc.gc.direction_output = gef_gpio_dir_out;
+ gef_gpio_chip->of_gc.gc.get = gef_gpio_get;
+ gef_gpio_chip->of_gc.gc.set = gef_gpio_set;
+
+ /* This function adds a memory mapped GPIO chip */
+ retval = of_mm_gpiochip_add(np, gef_gpio_chip);
+ if (retval) {
+ kfree(gef_gpio_chip);
+ pr_err("%s: Unable to add GPIO\n", np->full_name);
+ }
+ }
+
+ return 0;
+};
+arch_initcall(gef_gpio_init);
+
+MODULE_DESCRIPTION("GE Fanuc I/O FPGA GPIO driver");
+MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com");
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 548efa55c8fe..3d0c776f888d 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -195,16 +195,24 @@ config SPE
config PPC_STD_MMU
bool
- depends on 6xx || POWER3 || POWER4 || PPC64
+ depends on 6xx || PPC64
default y
config PPC_STD_MMU_32
def_bool y
depends on PPC_STD_MMU && PPC32
+config PPC_STD_MMU_64
+ def_bool y
+ depends on PPC_STD_MMU && PPC64
+
+config PPC_MMU_NOHASH
+ def_bool y
+ depends on !PPC_STD_MMU
+
config PPC_MM_SLICES
bool
- default y if HUGETLB_PAGE || PPC_64K_PAGES
+ default y if HUGETLB_PAGE || (PPC_STD_MMU_64 && PPC_64K_PAGES)
default n
config VIRT_CPU_ACCOUNTING
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index c14d7d8d96c8..5cc3279559a4 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -2,13 +2,18 @@ config PPC_CELL
bool
default n
-config PPC_CELL_NATIVE
+config PPC_CELL_COMMON
bool
select PPC_CELL
select PPC_DCR_MMIO
- select PPC_OF_PLATFORM_PCI
select PPC_INDIRECT_IO
select PPC_NATIVE
+ select PPC_RTAS
+
+config PPC_CELL_NATIVE
+ bool
+ select PPC_CELL_COMMON
+ select PPC_OF_PLATFORM_PCI
select MPIC
select IBM_NEW_EMAC_EMAC4
select IBM_NEW_EMAC_RGMII
@@ -20,7 +25,6 @@ config PPC_IBM_CELL_BLADE
bool "IBM Cell Blade"
depends on PPC_MULTIPLATFORM && PPC64
select PPC_CELL_NATIVE
- select PPC_RTAS
select MMIO_NVRAM
select PPC_UDBG_16550
select UDBG_RTAS_CONSOLE
@@ -28,16 +32,17 @@ config PPC_IBM_CELL_BLADE
config PPC_CELLEB
bool "Toshiba's Cell Reference Set 'Celleb' Architecture"
depends on PPC_MULTIPLATFORM && PPC64
- select PPC_CELL
select PPC_CELL_NATIVE
- select PPC_RTAS
- select PPC_INDIRECT_IO
- select PPC_OF_PLATFORM_PCI
select HAS_TXX9_SERIAL
select PPC_UDBG_BEAT
select USB_OHCI_BIG_ENDIAN_MMIO
select USB_EHCI_BIG_ENDIAN_MMIO
+config PPC_CELL_QPACE
+ bool "IBM Cell - QPACE"
+ depends on PPC_MULTIPLATFORM && PPC64
+ select PPC_CELL_COMMON
+
menu "Cell Broadband Engine options"
depends on PPC_CELL
@@ -102,7 +107,7 @@ config PPC_IBM_CELL_POWERBUTTON
config CBE_THERM
tristate "CBE thermal support"
default m
- depends on CBE_RAS
+ depends on CBE_RAS && SPU_BASE
config CBE_CPUFREQ
tristate "CBE frequency scaling"
@@ -136,5 +141,5 @@ endmenu
config OPROFILE_CELL
def_bool y
- depends on PPC_CELL_NATIVE && (OPROFILE = m || OPROFILE = y)
+ depends on PPC_CELL_NATIVE && (OPROFILE = m || OPROFILE = y) && SPU_BASE
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 7fd830872c43..43eccb270301 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,7 +1,7 @@
-obj-$(CONFIG_PPC_CELL_NATIVE) += interrupt.o iommu.o setup.o \
- cbe_regs.o spider-pic.o \
- pervasive.o pmu.o io-workarounds.o \
- spider-pci.o
+obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o
+
+obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \
+ pmu.o io-workarounds.o spider-pci.o
obj-$(CONFIG_CBE_RAS) += ras.o
obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
@@ -14,13 +14,12 @@ obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o
ifeq ($(CONFIG_SMP),y)
obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o
+obj-$(CONFIG_PPC_CELL_QPACE) += smp.o
endif
# needed only when building loadable spufs.ko
-spu-priv1-$(CONFIG_PPC_CELL_NATIVE) += spu_priv1_mmio.o
-
-spu-manage-$(CONFIG_PPC_CELLEB) += spu_manage.o
-spu-manage-$(CONFIG_PPC_CELL_NATIVE) += spu_manage.o
+spu-priv1-$(CONFIG_PPC_CELL_COMMON) += spu_priv1_mmio.o
+spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o
obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
spu_notify.o \
@@ -31,6 +30,8 @@ obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
obj-$(CONFIG_PCI_MSI) += axon_msi.o
+# qpace setup
+obj-$(CONFIG_PPC_CELL_QPACE) += qpace_setup.o
# celleb stuff
ifeq ($(CONFIG_PPC_CELLEB),y)
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
index b11cb30decb2..07c234f6b2b6 100644
--- a/arch/powerpc/platforms/cell/celleb_setup.c
+++ b/arch/powerpc/platforms/cell/celleb_setup.c
@@ -45,7 +45,6 @@
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/kexec.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
@@ -226,9 +225,6 @@ define_machine(celleb_beat) {
.pci_setup_phb = celleb_setup_phb,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = beat_kexec_cpu_down,
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
#endif
};
@@ -248,9 +244,4 @@ define_machine(celleb_native) {
.pci_probe_mode = celleb_pci_probe_mode,
.pci_setup_phb = celleb_setup_phb,
.init_IRQ = celleb_init_IRQ_native,
-#ifdef CONFIG_KEXEC
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
-#endif
};
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 3168272ab0d7..86db4dd170a0 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -1053,10 +1053,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
}
/* We must have dma-ranges properties for fixed mapping to work */
- for (np = NULL; (np = of_find_all_nodes(np));) {
- if (of_find_property(np, "dma-ranges", NULL))
- break;
- }
+ np = of_find_node_with_property(NULL, "dma-ranges");
of_node_put(np);
if (!np) {
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
new file mode 100644
index 000000000000..be84e6a16b30
--- /dev/null
+++ b/arch/powerpc/platforms/cell/qpace_setup.c
@@ -0,0 +1,152 @@
+/*
+ * linux/arch/powerpc/platforms/cell/qpace_setup.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Adapted from 'alpha' version by Gary Thomas
+ * Modified by Cort Dougan (cort@cs.nmt.edu)
+ * Modified by PPC64 Team, IBM Corp
+ * Modified by Cell Team, IBM Deutschland Entwicklung GmbH
+ * Modified by Benjamin Krill <ben@codiert.org>, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/console.h>
+#include <linux/of_platform.h>
+
+#include <asm/mmu.h>
+#include <asm/processor.h>
+#include <asm/io.h>
+#include <asm/kexec.h>
+#include <asm/pgtable.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+#include <asm/dma.h>
+#include <asm/machdep.h>
+#include <asm/time.h>
+#include <asm/cputable.h>
+#include <asm/irq.h>
+#include <asm/spu.h>
+#include <asm/spu_priv1.h>
+#include <asm/udbg.h>
+#include <asm/cell-regs.h>
+
+#include "interrupt.h"
+#include "pervasive.h"
+#include "ras.h"
+#include "io-workarounds.h"
+
+static void qpace_show_cpuinfo(struct seq_file *m)
+{
+ struct device_node *root;
+ const char *model = "";
+
+ root = of_find_node_by_path("/");
+ if (root)
+ model = of_get_property(root, "model", NULL);
+ seq_printf(m, "machine\t\t: CHRP %s\n", model);
+ of_node_put(root);
+}
+
+static void qpace_progress(char *s, unsigned short hex)
+{
+ printk("*** %04x : %s\n", hex, s ? s : "");
+}
+
+static int __init qpace_publish_devices(void)
+{
+ int node;
+
+ /* Publish OF platform devices for southbridge IOs */
+ of_platform_bus_probe(NULL, NULL, NULL);
+
+ /* There is no device for the MIC memory controller, thus we create
+ * a platform device for it to attach the EDAC driver to.
+ */
+ for_each_online_node(node) {
+ if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL)
+ continue;
+ platform_device_register_simple("cbe-mic", node, NULL, 0);
+ }
+
+ return 0;
+}
+machine_subsys_initcall(qpace, qpace_publish_devices);
+
+extern int qpace_notify(struct device *dev)
+{
+ /* set dma_ops for of_platform bus */
+ if (dev->bus && dev->bus->name
+ && !strcmp(dev->bus->name, "of_platform"))
+ set_dma_ops(dev, &dma_direct_ops);
+
+ return 0;
+}
+
+static void __init qpace_setup_arch(void)
+{
+#ifdef CONFIG_SPU_BASE
+ spu_priv1_ops = &spu_priv1_mmio_ops;
+ spu_management_ops = &spu_management_of_ops;
+#endif
+
+ cbe_regs_init();
+
+#ifdef CONFIG_CBE_RAS
+ cbe_ras_init();
+#endif
+
+#ifdef CONFIG_SMP
+ smp_init_cell();
+#endif
+
+ /* init to some ~sane value until calibrate_delay() runs */
+ loops_per_jiffy = 50000000;
+
+ cbe_pervasive_init();
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#endif
+
+ /* set notifier function */
+ platform_notify = &qpace_notify;
+}
+
+static int __init qpace_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (!of_flat_dt_is_compatible(root, "IBM,QPACE"))
+ return 0;
+
+ hpte_init_native();
+
+ return 1;
+}
+
+define_machine(qpace) {
+ .name = "QPACE",
+ .probe = qpace_probe,
+ .setup_arch = qpace_setup_arch,
+ .show_cpuinfo = qpace_show_cpuinfo,
+ .restart = rtas_restart,
+ .power_off = rtas_power_off,
+ .halt = rtas_halt,
+ .get_boot_time = rtas_get_boot_time,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = qpace_progress,
+ .init_IRQ = iic_init_IRQ,
+#ifdef CONFIG_KEXEC
+ .machine_kexec = default_machine_kexec,
+ .machine_kexec_prepare = default_machine_kexec_prepare,
+ .machine_crash_shutdown = default_machine_crash_shutdown,
+#endif
+};
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index ab721b50fbba..59305369f6b2 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -35,7 +35,6 @@
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/kexec.h>
#include <asm/pgtable.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -289,9 +288,4 @@ define_machine(cell) {
.progress = cell_progress,
.init_IRQ = cell_init_irq,
.pci_setup_phb = cell_setup_phb,
-#ifdef CONFIG_KEXEC
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
-#endif
};
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 1b26071a86ca..7106b63d401b 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -273,12 +273,10 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return VM_FAULT_NOPAGE;
if (ctx->state == SPU_STATE_SAVED) {
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- & ~_PAGE_NO_CACHE);
+ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
} else {
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE);
+ vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
}
vm_insert_pfn(vma, address, pfn);
@@ -338,8 +336,7 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE);
+ vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
vma->vm_ops = &spufs_mem_mmap_vmops;
return 0;
@@ -452,8 +449,7 @@ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_cntl_mmap_vmops;
return 0;
@@ -1155,8 +1151,7 @@ static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_signal1_mmap_vmops;
return 0;
@@ -1292,8 +1287,7 @@ static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_signal2_mmap_vmops;
return 0;
@@ -1414,8 +1408,7 @@ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_mss_mmap_vmops;
return 0;
@@ -1476,8 +1469,7 @@ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_psmap_mmap_vmops;
return 0;
@@ -1536,8 +1528,7 @@ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
- | _PAGE_NO_CACHE | _PAGE_GUARDED);
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &spufs_mfc_mmap_vmops;
return 0;
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index d3cde6b9d2df..f6b0c519d5a2 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -141,6 +141,7 @@ hydra_init(void)
of_node_put(np);
return 0;
}
+ of_node_put(np);
Hydra = ioremap(r.start, r.end-r.start);
printk("Hydra Mac I/O at %llx\n", (unsigned long long)r.start);
printk("Hydra Feature_Control was %x",
@@ -198,7 +199,7 @@ static void __init setup_peg2(struct pci_controller *hose, struct device_node *d
printk ("RTAS supporting Pegasos OF not found, please upgrade"
" your firmware\n");
}
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
/* keep the reference to the root node */
}
diff --git a/arch/powerpc/platforms/embedded6xx/c2k.c b/arch/powerpc/platforms/embedded6xx/c2k.c
index 32ba0fa0ad03..8cab5731850f 100644
--- a/arch/powerpc/platforms/embedded6xx/c2k.c
+++ b/arch/powerpc/platforms/embedded6xx/c2k.c
@@ -20,7 +20,6 @@
#include <linux/seq_file.h>
#include <linux/time.h>
#include <linux/of.h>
-#include <linux/kexec.h>
#include <asm/machdep.h>
#include <asm/prom.h>
@@ -147,9 +146,4 @@ define_machine(c2k) {
.get_irq = mv64x60_get_irq,
.restart = c2k_restart,
.calibrate_decr = generic_calibrate_decr,
-#ifdef CONFIG_KEXEC
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
-#endif
};
diff --git a/arch/powerpc/platforms/embedded6xx/prpmc2800.c b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
index 4c485e984236..670035f49a69 100644
--- a/arch/powerpc/platforms/embedded6xx/prpmc2800.c
+++ b/arch/powerpc/platforms/embedded6xx/prpmc2800.c
@@ -19,7 +19,6 @@
#include <asm/prom.h>
#include <asm/system.h>
#include <asm/time.h>
-#include <asm/kexec.h>
#include <mm/mmu_decl.h>
@@ -155,9 +154,4 @@ define_machine(prpmc2800){
.get_irq = mv64x60_get_irq,
.restart = prpmc2800_restart,
.calibrate_decr = generic_calibrate_decr,
-#ifdef CONFIG_KEXEC
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
-#endif
};
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
index 45ffd8e542f4..ed3753d8c109 100644
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -9,6 +9,7 @@ menu "iSeries device drivers"
config VIODASD
tristate "iSeries Virtual I/O disk support"
+ depends on BLOCK
help
If you are running on an iSeries system and you want to use
virtual disks created and managed by OS/400, say Y.
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index d4c61c3c9669..bfd60e4accee 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -50,7 +50,6 @@
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/io.h>
-#include <asm/kexec.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
@@ -335,9 +334,4 @@ define_machine(maple) {
.calibrate_decr = generic_calibrate_decr,
.progress = maple_progress,
.power_save = power4_idle,
-#ifdef CONFIG_KEXEC
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
-#endif
};
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 792d3ce8112e..65c585b8b00d 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -310,7 +310,7 @@ static int pmu_set_cpu_speed(int low_speed)
_set_L3CR(save_l3cr);
/* Restore userland MMU context */
- set_context(current->active_mm->context.id, current->active_mm->pgd);
+ switch_mmu_context(NULL, current->active_mm);
#ifdef DEBUG_FREQ
printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index bcf50d7056e9..54b7b76ed4f0 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -729,7 +729,7 @@ static void __init setup_bandit(struct pci_controller *hose,
static int __init setup_uninorth(struct pci_controller *hose,
struct resource *addr)
{
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
has_uninorth = 1;
hose->ops = &macrisc_pci_ops;
hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000);
@@ -996,7 +996,7 @@ void __init pmac_pci_init(void)
struct device_node *np, *root;
struct device_node *ht = NULL;
- ppc_pci_flags = PPC_PCI_CAN_SKIP_ISA_ALIGN;
+ ppc_pci_set_flags(PPC_PCI_CAN_SKIP_ISA_ALIGN);
root = of_find_node_by_path("/");
if (root == NULL) {
@@ -1055,7 +1055,7 @@ void __init pmac_pci_init(void)
* some offset between bus number and domains for now when we
* assign all busses should help for now
*/
- if (ppc_pci_flags & PPC_PCI_REASSIGN_ALL_BUS)
+ if (ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
pcibios_assign_bus_offset = 0x10;
#endif
}
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 82c14d203d8b..9b78f5300c24 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -60,7 +60,6 @@
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/io.h>
-#include <asm/kexec.h>
#include <asm/pci-bridge.h>
#include <asm/ohare.h>
#include <asm/mediabay.h>
@@ -310,9 +309,7 @@ static void __init pmac_setup_arch(void)
}
/* See if newworld or oldworld */
- for (ic = NULL; (ic = of_find_all_nodes(ic)) != NULL; )
- if (of_get_property(ic, "interrupt-controller", NULL))
- break;
+ ic = of_find_node_with_property(NULL, "interrupt-controller");
if (ic) {
pmac_newworld = 1;
of_node_put(ic);
@@ -740,11 +737,6 @@ define_machine(powermac) {
.pci_probe_mode = pmac_pci_probe_mode,
.power_save = power4_idle,
.enable_pmcs = power4_enable_pmcs,
-#ifdef CONFIG_KEXEC
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
-#endif
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32
.pcibios_enable_device_hook = pmac_pci_enable_device_hook,
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
index adee28da353f..1c2802fabd57 100644
--- a/arch/powerpc/platforms/powermac/sleep.S
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -17,6 +17,7 @@
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
+#include <asm/mmu.h>
#define MAGIC 0x4c617273 /* 'Lars' */
@@ -323,7 +324,7 @@ grackle_wake_up:
lwz r4,SL_IBAT3+4(r1)
mtibatl 3,r4
-BEGIN_FTR_SECTION
+BEGIN_MMU_FTR_SECTION
li r4,0
mtspr SPRN_DBAT4U,r4
mtspr SPRN_DBAT4L,r4
@@ -341,7 +342,7 @@ BEGIN_FTR_SECTION
mtspr SPRN_IBAT6L,r4
mtspr SPRN_IBAT7U,r4
mtspr SPRN_IBAT7L,r4
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
/* Flush all TLBs */
lis r4,0x1000
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 40f72c2a4699..6b0711c15eca 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -739,7 +739,7 @@ static void __init smp_core99_setup(int ncpus)
/* XXX should get this from reg properties */
for (i = 1; i < ncpus; ++i)
- smp_hw_index[i] = i;
+ set_hard_smp_processor_id(i, i);
}
#endif
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index ffdd8e963fbd..dbc124e05646 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -314,11 +314,17 @@ static int __init ps3_setup_vuart_device(enum ps3_match_id match_id,
result = ps3_system_bus_device_register(&p->dev);
- if (result)
+ if (result) {
pr_debug("%s:%d ps3_system_bus_device_register failed\n",
__func__, __LINE__);
-
+ goto fail_device_register;
+ }
pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ return 0;
+
+fail_device_register:
+ kfree(p);
+ pr_debug(" <- %s:%d fail\n", __func__, __LINE__);
return result;
}
@@ -463,11 +469,17 @@ static int __init ps3_register_sound_devices(void)
result = ps3_system_bus_device_register(&p->dev);
- if (result)
+ if (result) {
pr_debug("%s:%d ps3_system_bus_device_register failed\n",
__func__, __LINE__);
-
+ goto fail_device_register;
+ }
pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ return 0;
+
+fail_device_register:
+ kfree(p);
+ pr_debug(" <- %s:%d failed\n", __func__, __LINE__);
return result;
}
@@ -485,17 +497,24 @@ static int __init ps3_register_graphics_devices(void)
if (!p)
return -ENOMEM;
- p->dev.match_id = PS3_MATCH_ID_GRAPHICS;
- p->dev.match_sub_id = PS3_MATCH_SUB_ID_FB;
+ p->dev.match_id = PS3_MATCH_ID_GPU;
+ p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_FB;
p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
result = ps3_system_bus_device_register(&p->dev);
- if (result)
+ if (result) {
pr_debug("%s:%d ps3_system_bus_device_register failed\n",
__func__, __LINE__);
+ goto fail_device_register;
+ }
pr_debug(" <- %s:%d\n", __func__, __LINE__);
+ return 0;
+
+fail_device_register:
+ kfree(p);
+ pr_debug(" <- %s:%d failed\n", __func__, __LINE__);
return result;
}
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 3a58ffabccd9..a4d49dd9e8a9 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -649,7 +649,7 @@ static int dma_sb_region_create(struct ps3_dma_region *r)
{
int result;
- pr_info(" -> %s:%d:\n", __func__, __LINE__);
+ DBG(" -> %s:%d:\n", __func__, __LINE__);
BUG_ON(!r);
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 77bc330263c4..35f3e85cf60e 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -23,7 +23,6 @@
#include <linux/fs.h>
#include <linux/root_dev.h>
#include <linux/console.h>
-#include <linux/kexec.h>
#include <linux/bootmem.h>
#include <asm/machdep.h>
@@ -42,6 +41,10 @@
#define DBG pr_debug
#endif
+/* mutex synchronizing GPU accesses and video mode changes */
+DEFINE_MUTEX(ps3_gpu_mutex);
+EXPORT_SYMBOL_GPL(ps3_gpu_mutex);
+
#if !defined(CONFIG_SMP)
static void smp_send_stop(void) {}
#endif
@@ -277,8 +280,5 @@ define_machine(ps3) {
.halt = ps3_halt,
#if defined(CONFIG_KEXEC)
.kexec_cpu_down = ps3_kexec_cpu_down,
- .machine_kexec = default_machine_kexec,
- .machine_kexec_prepare = default_machine_kexec_prepare,
- .machine_crash_shutdown = default_machine_crash_shutdown,
#endif
};
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 661e9f77ebf6..ee0d22911621 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -31,7 +31,7 @@
#include "platform.h"
static struct device ps3_system_bus = {
- .bus_id = "ps3_system",
+ .init_name = "ps3_system",
};
/* FIXME: need device usage counters! */
@@ -175,7 +175,7 @@ int ps3_open_hv_device(struct ps3_system_bus_device *dev)
return ps3_open_hv_device_sb(dev);
case PS3_MATCH_ID_SOUND:
- case PS3_MATCH_ID_GRAPHICS:
+ case PS3_MATCH_ID_GPU:
return ps3_open_hv_device_gpu(dev);
case PS3_MATCH_ID_AV_SETTINGS:
@@ -213,7 +213,7 @@ int ps3_close_hv_device(struct ps3_system_bus_device *dev)
return ps3_close_hv_device_sb(dev);
case PS3_MATCH_ID_SOUND:
- case PS3_MATCH_ID_GRAPHICS:
+ case PS3_MATCH_ID_GPU:
return ps3_close_hv_device_gpu(dev);
case PS3_MATCH_ID_AV_SETTINGS:
@@ -356,12 +356,12 @@ static int ps3_system_bus_match(struct device *_dev,
if (result)
pr_info("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): match\n",
__func__, __LINE__,
- dev->match_id, dev->match_sub_id, dev->core.bus_id,
+ dev->match_id, dev->match_sub_id, dev_name(&dev->core),
drv->match_id, drv->match_sub_id, drv->core.name);
else
pr_debug("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): miss\n",
__func__, __LINE__,
- dev->match_id, dev->match_sub_id, dev->core.bus_id,
+ dev->match_id, dev->match_sub_id, dev_name(&dev->core),
drv->match_id, drv->match_sub_id, drv->core.name);
return result;
@@ -383,9 +383,9 @@ static int ps3_system_bus_probe(struct device *_dev)
result = drv->probe(dev);
else
pr_debug("%s:%d: %s no probe method\n", __func__, __LINE__,
- dev->core.bus_id);
+ dev_name(&dev->core));
- pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev->core.bus_id);
+ pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core));
return result;
}
@@ -407,7 +407,7 @@ static int ps3_system_bus_remove(struct device *_dev)
dev_dbg(&dev->core, "%s:%d %s: no remove method\n",
__func__, __LINE__, drv->core.name);
- pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev->core.bus_id);
+ pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core));
return result;
}
@@ -432,7 +432,7 @@ static void ps3_system_bus_shutdown(struct device *_dev)
BUG_ON(!drv);
dev_dbg(&dev->core, "%s:%d: %s -> %s\n", __func__, __LINE__,
- dev->core.bus_id, drv->core.name);
+ dev_name(&dev->core), drv->core.name);
if (drv->shutdown)
drv->shutdown(dev);
@@ -453,7 +453,8 @@ static int ps3_system_bus_uevent(struct device *_dev, struct kobj_uevent_env *en
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
- if (add_uevent_var(env, "MODALIAS=ps3:%d", dev->match_id))
+ if (add_uevent_var(env, "MODALIAS=ps3:%d:%d", dev->match_id,
+ dev->match_sub_id))
return -ENOMEM;
return 0;
}
@@ -462,7 +463,8 @@ static ssize_t modalias_show(struct device *_dev, struct device_attribute *a,
char *buf)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
- int len = snprintf(buf, PAGE_SIZE, "ps3:%d\n", dev->match_id);
+ int len = snprintf(buf, PAGE_SIZE, "ps3:%d:%d\n", dev->match_id,
+ dev->match_sub_id);
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
@@ -742,22 +744,18 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_IOC0:
dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops;
- snprintf(dev->core.bus_id, sizeof(dev->core.bus_id),
- "ioc0_%02x", ++dev_ioc0_count);
+ dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count);
break;
case PS3_DEVICE_TYPE_SB:
dev->core.archdata.dma_ops = &ps3_sb_dma_ops;
- snprintf(dev->core.bus_id, sizeof(dev->core.bus_id),
- "sb_%02x", ++dev_sb_count);
+ dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count);
break;
case PS3_DEVICE_TYPE_VUART:
- snprintf(dev->core.bus_id, sizeof(dev->core.bus_id),
- "vuart_%02x", ++dev_vuart_count);
+ dev_set_name(&dev->core, "vuart_%02x", ++dev_vuart_count);
break;
case PS3_DEVICE_TYPE_LPM:
- snprintf(dev->core.bus_id, sizeof(dev->core.bus_id),
- "lpm_%02x", ++dev_lpm_count);
+ dev_set_name(&dev->core, "lpm_%02x", ++dev_lpm_count);
break;
default:
BUG();
@@ -766,7 +764,7 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
dev->core.archdata.of_node = NULL;
set_dev_node(&dev->core, 0);
- pr_debug("%s:%d add %s\n", __func__, __LINE__, dev->core.bus_id);
+ pr_debug("%s:%d add %s\n", __func__, __LINE__, dev_name(&dev->core));
result = device_register(&dev->core);
return result;
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 97619fd51e39..ddc2a307cd50 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -54,7 +54,7 @@ config PPC_SMLPAR
config CMM
tristate "Collaborative memory management"
- depends on PPC_SMLPAR
+ depends on PPC_SMLPAR && !CRASH_DUMP
default y
help
Select this option, if you want to enable the kernel interface
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 5cd4d2761620..6567439fe78d 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -28,6 +28,7 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/oom.h>
+#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/stringify.h>
#include <linux/swap.h>
@@ -384,6 +385,26 @@ static void cmm_unregister_sysfs(struct sys_device *sysdev)
}
/**
+ * cmm_reboot_notifier - Make sure pages are not still marked as "loaned"
+ *
+ **/
+static int cmm_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ if (action == SYS_RESTART) {
+ if (cmm_thread_ptr)
+ kthread_stop(cmm_thread_ptr);
+ cmm_thread_ptr = NULL;
+ cmm_free_pages(loaned_pages);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cmm_reboot_nb = {
+ .notifier_call = cmm_reboot_notifier,
+};
+
+/**
* cmm_init - Module initialization
*
* Return value:
@@ -399,9 +420,12 @@ static int cmm_init(void)
if ((rc = register_oom_notifier(&cmm_oom_nb)) < 0)
return rc;
- if ((rc = cmm_sysfs_register(&cmm_sysdev)))
+ if ((rc = register_reboot_notifier(&cmm_reboot_nb)))
goto out_oom_notifier;
+ if ((rc = cmm_sysfs_register(&cmm_sysdev)))
+ goto out_reboot_notifier;
+
if (cmm_disabled)
return rc;
@@ -415,6 +439,8 @@ static int cmm_init(void)
out_unregister_sysfs:
cmm_unregister_sysfs(&cmm_sysdev);
+out_reboot_notifier:
+ unregister_reboot_notifier(&cmm_reboot_nb);
out_oom_notifier:
unregister_oom_notifier(&cmm_oom_nb);
return rc;
@@ -431,6 +457,7 @@ static void cmm_exit(void)
if (cmm_thread_ptr)
kthread_stop(cmm_thread_ptr);
unregister_oom_notifier(&cmm_oom_nb);
+ unregister_reboot_notifier(&cmm_reboot_nb);
cmm_free_pages(loaned_pages);
cmm_unregister_sysfs(&cmm_sysdev);
}
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 54816d75b578..989d6462c154 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -21,6 +21,8 @@
* Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com>
*/
+#undef DEBUG
+
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/list.h>
@@ -488,10 +490,8 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
pdn->eeh_mode & EEH_MODE_NOCHECK) {
ignored_check++;
-#ifdef DEBUG
- printk ("EEH:ignored check (%x) for %s %s\n",
- pdn->eeh_mode, pci_name (dev), dn->full_name);
-#endif
+ pr_debug("EEH: Ignored check (%x) for %s %s\n",
+ pdn->eeh_mode, pci_name (dev), dn->full_name);
return 0;
}
@@ -1014,10 +1014,9 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
eeh_subsystem_enabled = 1;
pdn->eeh_mode |= EEH_MODE_SUPPORTED;
-#ifdef DEBUG
- printk(KERN_DEBUG "EEH: %s: eeh enabled, config=%x pe_config=%x\n",
- dn->full_name, pdn->eeh_config_addr, pdn->eeh_pe_config_addr);
-#endif
+ pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n",
+ dn->full_name, pdn->eeh_config_addr,
+ pdn->eeh_pe_config_addr);
} else {
/* This device doesn't support EEH, but it may have an
@@ -1161,13 +1160,17 @@ static void eeh_add_device_late(struct pci_dev *dev)
if (!dev || !eeh_subsystem_enabled)
return;
-#ifdef DEBUG
- printk(KERN_DEBUG "EEH: adding device %s\n", pci_name(dev));
-#endif
+ pr_debug("EEH: Adding device %s\n", pci_name(dev));
- pci_dev_get (dev);
dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
+ if (pdn->pcidev == dev) {
+ pr_debug("EEH: Already referenced !\n");
+ return;
+ }
+ WARN_ON(pdn->pcidev);
+
+ pci_dev_get (dev);
pdn->pcidev = dev;
pci_addr_cache_insert_device(dev);
@@ -1206,17 +1209,18 @@ static void eeh_remove_device(struct pci_dev *dev)
return;
/* Unregister the device with the EEH/PCI address search system */
-#ifdef DEBUG
- printk(KERN_DEBUG "EEH: remove device %s\n", pci_name(dev));
-#endif
- pci_addr_cache_remove_device(dev);
- eeh_sysfs_remove_device(dev);
+ pr_debug("EEH: Removing device %s\n", pci_name(dev));
dn = pci_device_to_OF_node(dev);
- if (PCI_DN(dn)->pcidev) {
- PCI_DN(dn)->pcidev = NULL;
- pci_dev_put (dev);
+ if (PCI_DN(dn)->pcidev == NULL) {
+ pr_debug("EEH: Not referenced !\n");
+ return;
}
+ PCI_DN(dn)->pcidev = NULL;
+ pci_dev_put (dev);
+
+ pci_addr_cache_remove_device(dev);
+ eeh_sysfs_remove_device(dev);
}
void eeh_remove_bus_device(struct pci_dev *dev)
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 1f032483c026..a20ead87153d 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -116,7 +116,7 @@ static void pseries_cpu_die(unsigned int cpu)
cpu_status = query_cpu_stopped(pcpu);
if (cpu_status == 0 || cpu_status == -1)
break;
- msleep(200);
+ cpu_relax();
}
if (cpu_status != 0) {
printk("Querying DEAD? cpu %i (%i) shows %i\n",
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 7190493e9bdc..5e1ed3d60ee5 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -25,6 +25,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#undef DEBUG
+
#include <linux/pci.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
@@ -69,74 +71,25 @@ EXPORT_SYMBOL_GPL(pcibios_find_pci_bus);
* Remove all of the PCI devices under this bus both from the
* linux pci device tree, and from the powerpc EEH address cache.
*/
-void
-pcibios_remove_pci_devices(struct pci_bus *bus)
+void pcibios_remove_pci_devices(struct pci_bus *bus)
{
- struct pci_dev *dev, *tmp;
+ struct pci_dev *dev, *tmp;
+ struct pci_bus *child_bus;
+
+ /* First go down child busses */
+ list_for_each_entry(child_bus, &bus->children, node)
+ pcibios_remove_pci_devices(child_bus);
+ pr_debug("PCI: Removing devices on bus %04x:%02x\n",
+ pci_domain_nr(bus), bus->number);
list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
+ pr_debug(" * Removing %s...\n", pci_name(dev));
eeh_remove_bus_device(dev);
- pci_remove_bus_device(dev);
- }
+ pci_remove_bus_device(dev);
+ }
}
EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
-/* Must be called before pci_bus_add_devices */
-void
-pcibios_fixup_new_pci_devices(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- /* Skip already-added devices */
- if (!dev->is_added) {
- int i;
-
- /* Fill device archdata and setup iommu table */
- pcibios_setup_new_device(dev);
-
- pci_read_irq_line(dev);
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- struct resource *r = &dev->resource[i];
-
- if (r->parent || !r->start || !r->flags)
- continue;
- pci_claim_resource(dev, i);
- }
- }
- }
-}
-EXPORT_SYMBOL_GPL(pcibios_fixup_new_pci_devices);
-
-static int
-pcibios_pci_config_bridge(struct pci_dev *dev)
-{
- u8 sec_busno;
- struct pci_bus *child_bus;
-
- /* Get busno of downstream bus */
- pci_read_config_byte(dev, PCI_SECONDARY_BUS, &sec_busno);
-
- /* Add to children of PCI bridge dev->bus */
- child_bus = pci_add_new_bus(dev->bus, dev, sec_busno);
- if (!child_bus) {
- printk (KERN_ERR "%s: could not add second bus\n", __func__);
- return -EIO;
- }
- sprintf(child_bus->name, "PCI Bus #%02x", child_bus->number);
-
- pci_scan_child_bus(child_bus);
-
- /* Fixup new pci devices */
- pcibios_fixup_new_pci_devices(child_bus);
-
- /* Make the discovered devices available */
- pci_bus_add_devices(child_bus);
-
- eeh_add_device_tree_late(child_bus);
- return 0;
-}
-
/**
* pcibios_add_pci_devices - adds new pci devices to bus
*
@@ -147,10 +100,9 @@ pcibios_pci_config_bridge(struct pci_dev *dev)
* is how this routine differs from other, similar pcibios
* routines.)
*/
-void
-pcibios_add_pci_devices(struct pci_bus * bus)
+void pcibios_add_pci_devices(struct pci_bus * bus)
{
- int slotno, num, mode;
+ int slotno, num, mode, pass, max;
struct pci_dev *dev;
struct device_node *dn = pci_bus_to_OF_node(bus);
@@ -162,26 +114,23 @@ pcibios_add_pci_devices(struct pci_bus * bus)
if (mode == PCI_PROBE_DEVTREE) {
/* use ofdt-based probe */
- of_scan_bus(dn, bus);
- if (!list_empty(&bus->devices)) {
- pcibios_fixup_new_pci_devices(bus);
- pci_bus_add_devices(bus);
- eeh_add_device_tree_late(bus);
- }
+ of_rescan_bus(dn, bus);
} else if (mode == PCI_PROBE_NORMAL) {
/* use legacy probe */
slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
- if (num) {
- pcibios_fixup_new_pci_devices(bus);
- pci_bus_add_devices(bus);
- eeh_add_device_tree_late(bus);
+ if (!num)
+ return;
+ pcibios_setup_bus_devices(bus);
+ max = bus->secondary;
+ for (pass=0; pass < 2; pass++)
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
+ dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ max = pci_scan_bridge(bus, dev, max, pass);
}
-
- list_for_each_entry(dev, &bus->devices, bus_list)
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
- pcibios_pci_config_bridge(dev);
}
+ pcibios_finish_adding_to_bus(bus);
}
EXPORT_SYMBOL_GPL(pcibios_add_pci_devices);
@@ -190,6 +139,8 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
struct pci_controller *phb;
int primary;
+ pr_debug("PCI: Initializing new hotplug PHB %s\n", dn->full_name);
+
primary = list_empty(&hose_list);
phb = pcibios_alloc_controller(dn);
if (!phb)
@@ -203,11 +154,59 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
eeh_add_device_tree_early(dn);
scan_phb(phb);
- pcibios_allocate_bus_resources(phb->bus);
- pcibios_fixup_new_pci_devices(phb->bus);
- pci_bus_add_devices(phb->bus);
- eeh_add_device_tree_late(phb->bus);
+ pcibios_finish_adding_to_bus(phb->bus);
return phb;
}
EXPORT_SYMBOL_GPL(init_phb_dynamic);
+
+/* RPA-specific bits for removing PHBs */
+int remove_phb_dynamic(struct pci_controller *phb)
+{
+ struct pci_bus *b = phb->bus;
+ struct resource *res;
+ int rc, i;
+
+ pr_debug("PCI: Removing PHB %04x:%02x... \n",
+ pci_domain_nr(b), b->number);
+
+ /* We cannot to remove a root bus that has children */
+ if (!(list_empty(&b->children) && list_empty(&b->devices)))
+ return -EBUSY;
+
+ /* We -know- there aren't any child devices anymore at this stage
+ * and thus, we can safely unmap the IO space as it's not in use
+ */
+ res = &phb->io_resource;
+ if (res->flags & IORESOURCE_IO) {
+ rc = pcibios_unmap_io_space(b);
+ if (rc) {
+ printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
+ __func__, b->name);
+ return 1;
+ }
+ }
+
+ /* Unregister the bridge device from sysfs and remove the PCI bus */
+ device_unregister(b->bridge);
+ phb->bus = NULL;
+ pci_remove_bus(b);
+
+ /* Now release the IO resource */
+ if (res->flags & IORESOURCE_IO)
+ release_resource(res);
+
+ /* Release memory resources */
+ for (i = 0; i < 3; ++i) {
+ res = &phb->mem_resources[i];
+ if (!(res->flags & IORESOURCE_MEM))
+ continue;
+ release_resource(res);
+ }
+
+ /* Free pci_controller data structure */
+ pcibios_free_controller(phb);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(remove_phb_dynamic);
diff --git a/arch/powerpc/platforms/pseries/phyp_dump.c b/arch/powerpc/platforms/pseries/phyp_dump.c
index edbc012c2ebc..6cf35cd8d0b5 100644
--- a/arch/powerpc/platforms/pseries/phyp_dump.c
+++ b/arch/powerpc/platforms/pseries/phyp_dump.c
@@ -130,6 +130,9 @@ static unsigned long init_dump_header(struct phyp_dump_header *ph)
static void print_dump_header(const struct phyp_dump_header *ph)
{
#ifdef DEBUG
+ if (ph == NULL)
+ return;
+
printk(KERN_INFO "dump header:\n");
/* setup some ph->sections required */
printk(KERN_INFO "version = %d\n", ph->version);
@@ -411,6 +414,8 @@ static int __init phyp_dump_setup(void)
of_node_put(rtas);
}
+ ibm_configure_kernel_dump = rtas_token("ibm,configure-kernel-dump");
+
print_dump_header(dump_header);
dump_area_length = init_dump_header(&phdr);
/* align down */
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index e1904774a70f..f7a69021b7bf 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -579,7 +579,7 @@ static void xics_update_irq_servers(void)
int i, j;
struct device_node *np;
u32 ilen;
- const u32 *ireg, *isize;
+ const u32 *ireg;
u32 hcpuid;
/* Find the server numbers for the boot cpu. */
@@ -607,11 +607,6 @@ static void xics_update_irq_servers(void)
}
}
- /* get the bit size of server numbers */
- isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
- if (isize)
- interrupt_server_size = *isize;
-
of_node_put(np);
}
@@ -682,6 +677,7 @@ void __init xics_init_IRQ(void)
struct device_node *np;
u32 indx = 0;
int found = 0;
+ const u32 *isize;
ppc64_boot_msg(0x20, "XICS Init");
@@ -701,6 +697,26 @@ void __init xics_init_IRQ(void)
if (found == 0)
return;
+ /* get the bit size of server numbers */
+ found = 0;
+
+ for_each_compatible_node(np, NULL, "ibm,ppc-xics") {
+ isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
+
+ if (!isize)
+ continue;
+
+ if (!found) {
+ interrupt_server_size = *isize;
+ found = 1;
+ } else if (*isize != interrupt_server_size) {
+ printk(KERN_WARNING "XICS: "
+ "mismatched ibm,interrupt-server#-size\n");
+ interrupt_server_size = max(*isize,
+ interrupt_server_size);
+ }
+ }
+
xics_update_irq_servers();
xics_init_host();
@@ -728,9 +744,18 @@ static void xics_set_cpu_priority(unsigned char cppr)
/* Have the calling processor join or leave the specified global queue */
static void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
{
- int status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE,
- (1UL << interrupt_server_size) - 1 - gserver, join);
- WARN_ON(status < 0);
+ int index;
+ int status;
+
+ if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
+ return;
+
+ index = (1UL << interrupt_server_size) - 1 - gserver;
+
+ status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);
+
+ WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
+ GLOBAL_INTERRUPT_QUEUE, index, join, status);
}
void xics_setup_cpu(void)
diff --git a/arch/powerpc/sysdev/bestcomm/ata.c b/arch/powerpc/sysdev/bestcomm/ata.c
index 1f5258fb38c3..901c9f91e5dd 100644
--- a/arch/powerpc/sysdev/bestcomm/ata.c
+++ b/arch/powerpc/sysdev/bestcomm/ata.c
@@ -61,6 +61,9 @@ bcom_ata_init(int queue_len, int maxbufsize)
struct bcom_ata_var *var;
struct bcom_ata_inc *inc;
+ /* Prefetch breaks ATA DMA. Turn it off for ATA DMA */
+ bcom_disable_prefetch();
+
tsk = bcom_task_alloc(queue_len, sizeof(struct bcom_ata_bd), 0);
if (!tsk)
return NULL;
diff --git a/arch/powerpc/sysdev/bestcomm/ata.h b/arch/powerpc/sysdev/bestcomm/ata.h
index 10982769c465..0b2371811334 100644
--- a/arch/powerpc/sysdev/bestcomm/ata.h
+++ b/arch/powerpc/sysdev/bestcomm/ata.h
@@ -16,22 +16,15 @@
struct bcom_ata_bd {
u32 status;
- u32 dst_pa;
u32 src_pa;
+ u32 dst_pa;
};
-extern struct bcom_task *
-bcom_ata_init(int queue_len, int maxbufsize);
-
-extern void
-bcom_ata_rx_prepare(struct bcom_task *tsk);
-
-extern void
-bcom_ata_tx_prepare(struct bcom_task *tsk);
-
-extern void
-bcom_ata_reset_bd(struct bcom_task *tsk);
-
+extern struct bcom_task * bcom_ata_init(int queue_len, int maxbufsize);
+extern void bcom_ata_rx_prepare(struct bcom_task *tsk);
+extern void bcom_ata_tx_prepare(struct bcom_task *tsk);
+extern void bcom_ata_reset_bd(struct bcom_task *tsk);
+extern void bcom_ata_release(struct bcom_task *tsk);
#endif /* __BESTCOMM_ATA_H__ */
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c
index 446c9ea85b30..378ebd9aac18 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.c
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c
@@ -279,7 +279,6 @@ bcom_engine_init(void)
int task;
phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa;
unsigned int tdt_size, ctx_size, var_size, fdt_size;
- u16 regval;
/* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */
tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt);
@@ -331,10 +330,8 @@ bcom_engine_init(void)
out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS);
/* Disable COMM Bus Prefetch on the original 5200; it's broken */
- if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR) {
- regval = in_be16(&bcom_eng->regs->PtdCntrl);
- out_be16(&bcom_eng->regs->PtdCntrl, regval | 1);
- }
+ if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
+ bcom_disable_prefetch();
/* Init lock */
spin_lock_init(&bcom_eng->lock);
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.h b/arch/powerpc/sysdev/bestcomm/bestcomm.h
index c960a8b49655..23a95f80dfdb 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm.h
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm.h
@@ -16,8 +16,19 @@
#ifndef __BESTCOMM_H__
#define __BESTCOMM_H__
-struct bcom_bd; /* defined later on ... */
-
+/**
+ * struct bcom_bd - Structure describing a generic BestComm buffer descriptor
+ * @status: The current status of this buffer. Exact meaning depends on the
+ * task type
+ * @data: An array of u32 extra data. Size of array is task dependant.
+ *
+ * Note: Don't dereference a bcom_bd pointer as an array. The size of the
+ * bcom_bd is variable. Use bcom_get_bd() instead.
+ */
+struct bcom_bd {
+ u32 status;
+ u32 data[0]; /* variable payload size */
+};
/* ======================================================================== */
/* Generic task management */
@@ -84,17 +95,6 @@ bcom_get_task_irq(struct bcom_task *tsk) {
/* BD based tasks helpers */
/* ======================================================================== */
-/**
- * struct bcom_bd - Structure describing a generic BestComm buffer descriptor
- * @status: The current status of this buffer. Exact meaning depends on the
- * task type
- * @data: An array of u32 whose meaning depends on the task type.
- */
-struct bcom_bd {
- u32 status;
- u32 data[1]; /* variable, but at least 1 */
-};
-
#define BCOM_BD_READY 0x40000000ul
/** _bcom_next_index - Get next input index.
@@ -140,15 +140,31 @@ bcom_queue_full(struct bcom_task *tsk)
}
/**
+ * bcom_get_bd - Get a BD from the queue
+ * @tsk: The BestComm task structure
+ * index: Index of the BD to fetch
+ */
+static inline struct bcom_bd
+*bcom_get_bd(struct bcom_task *tsk, unsigned int index)
+{
+ /* A cast to (void*) so the address can be incremented by the
+ * real size instead of by sizeof(struct bcom_bd) */
+ return ((void *)tsk->bd) + (index * tsk->bd_size);
+}
+
+/**
* bcom_buffer_done - Checks if a BestComm
* @tsk: The BestComm task structure
*/
static inline int
bcom_buffer_done(struct bcom_task *tsk)
{
+ struct bcom_bd *bd;
if (bcom_queue_empty(tsk))
return 0;
- return !(tsk->bd[tsk->outdex].status & BCOM_BD_READY);
+
+ bd = bcom_get_bd(tsk, tsk->outdex);
+ return !(bd->status & BCOM_BD_READY);
}
/**
@@ -160,16 +176,21 @@ bcom_buffer_done(struct bcom_task *tsk)
static inline struct bcom_bd *
bcom_prepare_next_buffer(struct bcom_task *tsk)
{
- tsk->bd[tsk->index].status = 0; /* cleanup last status */
- return &tsk->bd[tsk->index];
+ struct bcom_bd *bd;
+
+ bd = bcom_get_bd(tsk, tsk->index);
+ bd->status = 0; /* cleanup last status */
+ return bd;
}
static inline void
bcom_submit_next_buffer(struct bcom_task *tsk, void *cookie)
{
+ struct bcom_bd *bd = bcom_get_bd(tsk, tsk->index);
+
tsk->cookie[tsk->index] = cookie;
mb(); /* ensure the bd is really up-to-date */
- tsk->bd[tsk->index].status |= BCOM_BD_READY;
+ bd->status |= BCOM_BD_READY;
tsk->index = _bcom_next_index(tsk);
if (tsk->flags & BCOM_FLAGS_ENABLE_TASK)
bcom_enable(tsk);
@@ -179,10 +200,12 @@ static inline void *
bcom_retrieve_buffer(struct bcom_task *tsk, u32 *p_status, struct bcom_bd **p_bd)
{
void *cookie = tsk->cookie[tsk->outdex];
+ struct bcom_bd *bd = bcom_get_bd(tsk, tsk->outdex);
+
if (p_status)
- *p_status = tsk->bd[tsk->outdex].status;
+ *p_status = bd->status;
if (p_bd)
- *p_bd = &tsk->bd[tsk->outdex];
+ *p_bd = bd;
tsk->outdex = _bcom_next_outdex(tsk);
return cookie;
}
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
index 866a2915ef2f..eb0d1c883c31 100644
--- a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
+++ b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h
@@ -198,8 +198,8 @@ struct bcom_task_header {
#define BCOM_IPR_SCTMR_1 2
#define BCOM_IPR_FEC_RX 6
#define BCOM_IPR_FEC_TX 5
-#define BCOM_IPR_ATA_RX 4
-#define BCOM_IPR_ATA_TX 3
+#define BCOM_IPR_ATA_RX 7
+#define BCOM_IPR_ATA_TX 7
#define BCOM_IPR_SCPCI_RX 2
#define BCOM_IPR_SCPCI_TX 2
#define BCOM_IPR_PSC3_RX 2
@@ -241,6 +241,22 @@ extern void bcom_set_initiator(int task, int initiator);
#define TASK_ENABLE 0x8000
+/**
+ * bcom_disable_prefetch - Hook to disable bus prefetching
+ *
+ * ATA DMA and the original MPC5200 need this due to silicon bugs. At the
+ * moment disabling prefetch is a one-way street. There is no mechanism
+ * in place to turn prefetch back on after it has been disabled. There is
+ * no reason it couldn't be done, it would just be more complex to implement.
+ */
+static inline void bcom_disable_prefetch(void)
+{
+ u16 regval;
+
+ regval = in_be16(&bcom_eng->regs->PtdCntrl);
+ out_be16(&bcom_eng->regs->PtdCntrl, regval | 1);
+};
+
static inline void
bcom_enable_task(int task)
{
diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S
index 2078f39e2f17..d3098ef1404a 100644
--- a/arch/powerpc/sysdev/dcr-low.S
+++ b/arch/powerpc/sysdev/dcr-low.S
@@ -11,14 +11,20 @@
#include <asm/ppc_asm.h>
#include <asm/processor.h>
+#include <asm/bug.h>
#define DCR_ACCESS_PROLOG(table) \
+ cmpli cr0,r3,1024; \
rlwinm r3,r3,4,18,27; \
lis r5,table@h; \
ori r5,r5,table@l; \
add r3,r3,r5; \
+ bge- 1f; \
mtctr r3; \
- bctr
+ bctr; \
+1: trap; \
+ EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0; \
+ blr
_GLOBAL(__mfdcr)
DCR_ACCESS_PROLOG(__mfdcr_table)
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
index a8ba9983dd5a..bb44aa9fd470 100644
--- a/arch/powerpc/sysdev/dcr.c
+++ b/arch/powerpc/sysdev/dcr.c
@@ -124,7 +124,8 @@ EXPORT_SYMBOL_GPL(dcr_write_generic);
#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */
-unsigned int dcr_resource_start(struct device_node *np, unsigned int index)
+unsigned int dcr_resource_start(const struct device_node *np,
+ unsigned int index)
{
unsigned int ds;
const u32 *dr = of_get_property(np, "dcr-reg", &ds);
@@ -136,7 +137,7 @@ unsigned int dcr_resource_start(struct device_node *np, unsigned int index)
}
EXPORT_SYMBOL_GPL(dcr_resource_start);
-unsigned int dcr_resource_len(struct device_node *np, unsigned int index)
+unsigned int dcr_resource_len(const struct device_node *np, unsigned int index)
{
unsigned int ds;
const u32 *dr = of_get_property(np, "dcr-reg", &ds);
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 5b264eb4b1f7..d5f9ae0f1b75 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -187,7 +187,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary)
printk(KERN_WARNING "Can't get bus-range for %s, assume"
" bus 0\n", dev->full_name);
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
hose = pcibios_alloc_controller(dev);
if (!hose)
return -ENOMEM;
@@ -300,7 +300,7 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
" bus 0\n", dev->full_name);
}
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
hose = pcibios_alloc_controller(dev);
if (!hose)
return -ENOMEM;
diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c
index d502927644c6..5da37c2f22ee 100644
--- a/arch/powerpc/sysdev/grackle.c
+++ b/arch/powerpc/sysdev/grackle.c
@@ -57,7 +57,7 @@ void __init setup_grackle(struct pci_controller *hose)
{
setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0);
if (machine_is_compatible("PowerMac1,1"))
- ppc_pci_flags |= PPC_PCI_REASSIGN_ALL_BUS;
+ ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
if (machine_is_compatible("AAPL,PowerBook1998"))
grackle_set_loop_snoop(hose, 1);
#if 0 /* Disabled for now, HW problems ??? */
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 1890fb085cde..c82babb70074 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -661,17 +661,6 @@ static inline void mpic_eoi(struct mpic *mpic)
(void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI));
}
-#ifdef CONFIG_SMP
-static irqreturn_t mpic_ipi_action(int irq, void *data)
-{
- long ipi = (long)data;
-
- smp_message_recv(ipi);
-
- return IRQ_HANDLED;
-}
-#endif /* CONFIG_SMP */
-
/*
* Linux descriptor level callbacks
*/
@@ -1548,13 +1537,7 @@ unsigned int mpic_get_mcirq(void)
void mpic_request_ipis(void)
{
struct mpic *mpic = mpic_primary;
- long i, err;
- static char *ipi_names[] = {
- "IPI0 (call function)",
- "IPI1 (reschedule)",
- "IPI2 (call function single)",
- "IPI3 (debugger break)",
- };
+ int i;
BUG_ON(mpic == NULL);
printk(KERN_INFO "mpic: requesting IPIs ... \n");
@@ -1563,17 +1546,10 @@ void mpic_request_ipis(void)
unsigned int vipi = irq_create_mapping(mpic->irqhost,
mpic->ipi_vecs[0] + i);
if (vipi == NO_IRQ) {
- printk(KERN_ERR "Failed to map IPI %ld\n", i);
- break;
- }
- err = request_irq(vipi, mpic_ipi_action,
- IRQF_DISABLED|IRQF_PERCPU,
- ipi_names[i], (void *)i);
- if (err) {
- printk(KERN_ERR "Request of irq %d for IPI %ld failed\n",
- vipi, i);
- break;
+ printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]);
+ continue;
}
+ smp_request_message_ipi(vipi, i);
}
}
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index d3e4d61030b5..77fae5f64f2e 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -194,11 +194,41 @@ static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
* 4xx PCI 2.x part
*/
+static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
+ void __iomem *reg,
+ u64 plb_addr,
+ u64 pci_addr,
+ u64 size,
+ unsigned int flags,
+ int index)
+{
+ u32 ma, pcila, pciha;
+
+ if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
+ size < 0x1000 || (plb_addr & (size - 1)) != 0) {
+ printk(KERN_WARNING "%s: Resource out of range\n",
+ hose->dn->full_name);
+ return -1;
+ }
+ ma = (0xffffffffu << ilog2(size)) | 1;
+ if (flags & IORESOURCE_PREFETCH)
+ ma |= 2;
+
+ pciha = RES_TO_U32_HIGH(pci_addr);
+ pcila = RES_TO_U32_LOW(pci_addr);
+
+ writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index));
+ writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index));
+ writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index));
+ writel(ma, reg + PCIL0_PMM0MA + (0x10 * index));
+
+ return 0;
+}
+
static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
void __iomem *reg)
{
- u32 la, ma, pcila, pciha;
- int i, j;
+ int i, j, found_isa_hole = 0;
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
@@ -213,28 +243,29 @@ static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
break;
}
- /* Calculate register values */
- la = res->start;
- pciha = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset);
- pcila = RES_TO_U32_LOW(res->start - hose->pci_mem_offset);
-
- ma = res->end + 1 - res->start;
- if (!is_power_of_2(ma) || ma < 0x1000 || ma > 0xffffffffu) {
- printk(KERN_WARNING "%s: Resource out of range\n",
- hose->dn->full_name);
- continue;
+ /* Configure the resource */
+ if (ppc4xx_setup_one_pci_PMM(hose, reg,
+ res->start,
+ res->start - hose->pci_mem_offset,
+ res->end + 1 - res->start,
+ res->flags,
+ j) == 0) {
+ j++;
+
+ /* If the resource PCI address is 0 then we have our
+ * ISA memory hole
+ */
+ if (res->start == hose->pci_mem_offset)
+ found_isa_hole = 1;
}
- ma = (0xffffffffu << ilog2(ma)) | 0x1;
- if (res->flags & IORESOURCE_PREFETCH)
- ma |= 0x2;
-
- /* Program register values */
- writel(la, reg + PCIL0_PMM0LA + (0x10 * j));
- writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * j));
- writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * j));
- writel(ma, reg + PCIL0_PMM0MA + (0x10 * j));
- j++;
}
+
+ /* Handle ISA memory hole if not already covered */
+ if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
+ if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
+ hose->isa_mem_size, 0, j) == 0)
+ printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
+ hose->dn->full_name);
}
static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
@@ -352,11 +383,52 @@ static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
* 4xx PCI-X part
*/
+static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose,
+ void __iomem *reg,
+ u64 plb_addr,
+ u64 pci_addr,
+ u64 size,
+ unsigned int flags,
+ int index)
+{
+ u32 lah, lal, pciah, pcial, sa;
+
+ if (!is_power_of_2(size) || size < 0x1000 ||
+ (plb_addr & (size - 1)) != 0) {
+ printk(KERN_WARNING "%s: Resource out of range\n",
+ hose->dn->full_name);
+ return -1;
+ }
+
+ /* Calculate register values */
+ lah = RES_TO_U32_HIGH(plb_addr);
+ lal = RES_TO_U32_LOW(plb_addr);
+ pciah = RES_TO_U32_HIGH(pci_addr);
+ pcial = RES_TO_U32_LOW(pci_addr);
+ sa = (0xffffffffu << ilog2(size)) | 0x1;
+
+ /* Program register values */
+ if (index == 0) {
+ writel(lah, reg + PCIX0_POM0LAH);
+ writel(lal, reg + PCIX0_POM0LAL);
+ writel(pciah, reg + PCIX0_POM0PCIAH);
+ writel(pcial, reg + PCIX0_POM0PCIAL);
+ writel(sa, reg + PCIX0_POM0SA);
+ } else {
+ writel(lah, reg + PCIX0_POM1LAH);
+ writel(lal, reg + PCIX0_POM1LAL);
+ writel(pciah, reg + PCIX0_POM1PCIAH);
+ writel(pcial, reg + PCIX0_POM1PCIAL);
+ writel(sa, reg + PCIX0_POM1SA);
+ }
+
+ return 0;
+}
+
static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
void __iomem *reg)
{
- u32 lah, lal, pciah, pcial, sa;
- int i, j;
+ int i, j, found_isa_hole = 0;
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
@@ -371,36 +443,29 @@ static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
break;
}
- /* Calculate register values */
- lah = RES_TO_U32_HIGH(res->start);
- lal = RES_TO_U32_LOW(res->start);
- pciah = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset);
- pcial = RES_TO_U32_LOW(res->start - hose->pci_mem_offset);
- sa = res->end + 1 - res->start;
- if (!is_power_of_2(sa) || sa < 0x100000 ||
- sa > 0xffffffffu) {
- printk(KERN_WARNING "%s: Resource out of range\n",
- hose->dn->full_name);
- continue;
+ /* Configure the resource */
+ if (ppc4xx_setup_one_pcix_POM(hose, reg,
+ res->start,
+ res->start - hose->pci_mem_offset,
+ res->end + 1 - res->start,
+ res->flags,
+ j) == 0) {
+ j++;
+
+ /* If the resource PCI address is 0 then we have our
+ * ISA memory hole
+ */
+ if (res->start == hose->pci_mem_offset)
+ found_isa_hole = 1;
}
- sa = (0xffffffffu << ilog2(sa)) | 0x1;
-
- /* Program register values */
- if (j == 0) {
- writel(lah, reg + PCIX0_POM0LAH);
- writel(lal, reg + PCIX0_POM0LAL);
- writel(pciah, reg + PCIX0_POM0PCIAH);
- writel(pcial, reg + PCIX0_POM0PCIAL);
- writel(sa, reg + PCIX0_POM0SA);
- } else {
- writel(lah, reg + PCIX0_POM1LAH);
- writel(lal, reg + PCIX0_POM1LAL);
- writel(pciah, reg + PCIX0_POM1PCIAH);
- writel(pcial, reg + PCIX0_POM1PCIAL);
- writel(sa, reg + PCIX0_POM1SA);
- }
- j++;
}
+
+ /* Handle ISA memory hole if not already covered */
+ if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
+ if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
+ hose->isa_mem_size, 0, j) == 0)
+ printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
+ hose->dn->full_name);
}
static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
@@ -1317,12 +1382,72 @@ static struct pci_ops ppc4xx_pciex_pci_ops =
.write = ppc4xx_pciex_write_config,
};
+static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port,
+ struct pci_controller *hose,
+ void __iomem *mbase,
+ u64 plb_addr,
+ u64 pci_addr,
+ u64 size,
+ unsigned int flags,
+ int index)
+{
+ u32 lah, lal, pciah, pcial, sa;
+
+ if (!is_power_of_2(size) ||
+ (index < 2 && size < 0x100000) ||
+ (index == 2 && size < 0x100) ||
+ (plb_addr & (size - 1)) != 0) {
+ printk(KERN_WARNING "%s: Resource out of range\n",
+ hose->dn->full_name);
+ return -1;
+ }
+
+ /* Calculate register values */
+ lah = RES_TO_U32_HIGH(plb_addr);
+ lal = RES_TO_U32_LOW(plb_addr);
+ pciah = RES_TO_U32_HIGH(pci_addr);
+ pcial = RES_TO_U32_LOW(pci_addr);
+ sa = (0xffffffffu << ilog2(size)) | 0x1;
+
+ /* Program register values */
+ switch (index) {
+ case 0:
+ out_le32(mbase + PECFG_POM0LAH, pciah);
+ out_le32(mbase + PECFG_POM0LAL, pcial);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
+ /* Note that 3 here means enabled | single region */
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | 3);
+ break;
+ case 1:
+ out_le32(mbase + PECFG_POM1LAH, pciah);
+ out_le32(mbase + PECFG_POM1LAL, pcial);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
+ /* Note that 3 here means enabled | single region */
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, sa | 3);
+ break;
+ case 2:
+ out_le32(mbase + PECFG_POM2LAH, pciah);
+ out_le32(mbase + PECFG_POM2LAL, pcial);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
+ /* Note that 3 here means enabled | IO space !!! */
+ dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, sa | 3);
+ break;
+ }
+
+ return 0;
+}
+
static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
struct pci_controller *hose,
void __iomem *mbase)
{
- u32 lah, lal, pciah, pcial, sa;
- int i, j;
+ int i, j, found_isa_hole = 0;
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
@@ -1337,53 +1462,38 @@ static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
break;
}
- /* Calculate register values */
- lah = RES_TO_U32_HIGH(res->start);
- lal = RES_TO_U32_LOW(res->start);
- pciah = RES_TO_U32_HIGH(res->start - hose->pci_mem_offset);
- pcial = RES_TO_U32_LOW(res->start - hose->pci_mem_offset);
- sa = res->end + 1 - res->start;
- if (!is_power_of_2(sa) || sa < 0x100000 ||
- sa > 0xffffffffu) {
- printk(KERN_WARNING "%s: Resource out of range\n",
- port->node->full_name);
- continue;
- }
- sa = (0xffffffffu << ilog2(sa)) | 0x1;
-
- /* Program register values */
- switch (j) {
- case 0:
- out_le32(mbase + PECFG_POM0LAH, pciah);
- out_le32(mbase + PECFG_POM0LAL, pcial);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, sa | 3);
- break;
- case 1:
- out_le32(mbase + PECFG_POM1LAH, pciah);
- out_le32(mbase + PECFG_POM1LAL, pcial);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, sa | 3);
- break;
+ /* Configure the resource */
+ if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
+ res->start,
+ res->start - hose->pci_mem_offset,
+ res->end + 1 - res->start,
+ res->flags,
+ j) == 0) {
+ j++;
+
+ /* If the resource PCI address is 0 then we have our
+ * ISA memory hole
+ */
+ if (res->start == hose->pci_mem_offset)
+ found_isa_hole = 1;
}
- j++;
}
- /* Configure IO, always 64K starting at 0 */
- if (hose->io_resource.flags & IORESOURCE_IO) {
- lah = RES_TO_U32_HIGH(hose->io_base_phys);
- lal = RES_TO_U32_LOW(hose->io_base_phys);
- out_le32(mbase + PECFG_POM2LAH, 0);
- out_le32(mbase + PECFG_POM2LAL, 0);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
- dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0xffff0000 | 3);
- }
+ /* Handle ISA memory hole if not already covered */
+ if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
+ if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
+ hose->isa_mem_phys, 0,
+ hose->isa_mem_size, 0, j) == 0)
+ printk(KERN_INFO "%s: Legacy ISA memory support enabled\n",
+ hose->dn->full_name);
+
+ /* Configure IO, always 64K starting at 0. We hard wire it to 64K !
+ * Note also that it -has- to be region index 2 on this HW
+ */
+ if (hose->io_resource.flags & IORESOURCE_IO)
+ ppc4xx_setup_one_pciex_POM(port, hose, mbase,
+ hose->io_base_phys, 0,
+ 0x10000, IORESOURCE_IO, 2);
}
static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index b3b73ae57d6d..01bce3784b0a 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
+#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/bootmem.h>
@@ -38,6 +39,8 @@ static void qe_snums_init(void);
static int qe_sdma_init(void);
static DEFINE_SPINLOCK(qe_lock);
+DEFINE_SPINLOCK(cmxgcr_lock);
+EXPORT_SYMBOL(cmxgcr_lock);
/* QE snum state */
enum qe_snum_state {
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c
index 1d78071aad7d..ebb442ea1917 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc.c
@@ -18,6 +18,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/stddef.h>
+#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/irq.h>
@@ -26,9 +27,6 @@
#include <asm/qe.h>
#include <asm/ucc.h>
-DEFINE_SPINLOCK(cmxgcr_lock);
-EXPORT_SYMBOL(cmxgcr_lock);
-
int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
{
unsigned long flags;
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 51d97588e762..9cb03b71b9d6 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -4,7 +4,7 @@ ifdef CONFIG_PPC64
EXTRA_CFLAGS += -mno-minimal-toc
endif
-obj-y += xmon.o setjmp.o start.o nonstdio.o
+obj-y += xmon.o start.o nonstdio.o
ifdef CONFIG_XMON_DISASSEMBLY
obj-y += ppc-dis.o ppc-opc.o
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 076368c8b8a9..8dfad7d9a004 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -41,6 +41,7 @@
#include <asm/spu_priv1.h>
#include <asm/firmware.h>
#include <asm/setjmp.h>
+#include <asm/reg.h>
#ifdef CONFIG_PPC64
#include <asm/hvcall.h>
@@ -159,8 +160,6 @@ static int xmon_no_auto_backtrace;
extern void xmon_enter(void);
extern void xmon_leave(void);
-extern void xmon_save_regs(struct pt_regs *);
-
#ifdef CONFIG_PPC64
#define REG "%.16lx"
#define REGS_PER_LINE 4
@@ -532,7 +531,7 @@ int xmon(struct pt_regs *excp)
struct pt_regs regs;
if (excp == NULL) {
- xmon_save_regs(&regs);
+ ppc_save_regs(&regs);
excp = &regs;
}
diff --git a/arch/sparc/include/asm/device.h b/arch/sparc/include/asm/device.h
index 19790eb99cc6..3702e087df2c 100644
--- a/arch/sparc/include/asm/device.h
+++ b/arch/sparc/include/asm/device.h
@@ -20,4 +20,16 @@ struct dev_archdata {
int numa_node;
};
+static inline void dev_archdata_set_node(struct dev_archdata *ad,
+ struct device_node *np)
+{
+ ad->prom_node = np;
+}
+
+static inline struct device_node *
+dev_archdata_get_node(const struct dev_archdata *ad)
+{
+ return ad->prom_node;
+}
+
#endif /* _ASM_SPARC_DEVICE_H */
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 421b7c71e72d..1a7be96d627b 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -456,7 +456,8 @@ config PATA_MARVELL
config PATA_MPC52xx
tristate "Freescale MPC52xx SoC internal IDE"
- depends on PPC_MPC52xx
+ depends on PPC_MPC52xx && PPC_BESTCOMM
+ select PPC_BESTCOMM_ATA
help
This option enables support for integrated IDE controller
of the Freescale MPC52xx SoC.
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index a9e827356d06..50ae6d13078a 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -6,6 +6,9 @@
* Copyright (C) 2006 Sylvain Munaut <tnt@246tNt.com>
* Copyright (C) 2003 Mipsys - Benjamin Herrenschmidt
*
+ * UDMA support based on patches by Freescale (Bernard Kuhn, John Rigby),
+ * Domen Puncer and Tim Yamin.
+ *
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
@@ -17,28 +20,46 @@
#include <linux/delay.h>
#include <linux/libata.h>
#include <linux/of_platform.h>
+#include <linux/types.h>
-#include <asm/types.h>
+#include <asm/cacheflush.h>
#include <asm/prom.h>
#include <asm/mpc52xx.h>
+#include <sysdev/bestcomm/bestcomm.h>
+#include <sysdev/bestcomm/bestcomm_priv.h>
+#include <sysdev/bestcomm/ata.h>
#define DRV_NAME "mpc52xx_ata"
-#define DRV_VERSION "0.1.2"
-
/* Private structures used by the driver */
struct mpc52xx_ata_timings {
u32 pio1;
u32 pio2;
+ u32 mdma1;
+ u32 mdma2;
+ u32 udma1;
+ u32 udma2;
+ u32 udma3;
+ u32 udma4;
+ u32 udma5;
+ int using_udma;
};
struct mpc52xx_ata_priv {
unsigned int ipb_period;
- struct mpc52xx_ata __iomem * ata_regs;
+ struct mpc52xx_ata __iomem *ata_regs;
+ phys_addr_t ata_regs_pa;
int ata_irq;
struct mpc52xx_ata_timings timings[2];
int csel;
+
+ /* DMA */
+ struct bcom_task *dmatsk;
+ const struct udmaspec *udmaspec;
+ const struct mdmaspec *mdmaspec;
+ int mpc52xx_ata_dma_last_write;
+ int waiting_for_dma;
};
@@ -53,6 +74,107 @@ static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
#define CALC_CLKCYC(c,v) ((((v)+(c)-1)/(c)))
+/* ======================================================================== */
+
+/* ATAPI-4 MDMA specs (in clocks) */
+struct mdmaspec {
+ u32 t0M;
+ u32 td;
+ u32 th;
+ u32 tj;
+ u32 tkw;
+ u32 tm;
+ u32 tn;
+};
+
+static const struct mdmaspec mdmaspec66[3] = {
+ { .t0M = 32, .td = 15, .th = 2, .tj = 2, .tkw = 15, .tm = 4, .tn = 1 },
+ { .t0M = 10, .td = 6, .th = 1, .tj = 1, .tkw = 4, .tm = 2, .tn = 1 },
+ { .t0M = 8, .td = 5, .th = 1, .tj = 1, .tkw = 2, .tm = 2, .tn = 1 },
+};
+
+static const struct mdmaspec mdmaspec132[3] = {
+ { .t0M = 64, .td = 29, .th = 3, .tj = 3, .tkw = 29, .tm = 7, .tn = 2 },
+ { .t0M = 20, .td = 11, .th = 2, .tj = 1, .tkw = 7, .tm = 4, .tn = 1 },
+ { .t0M = 16, .td = 10, .th = 2, .tj = 1, .tkw = 4, .tm = 4, .tn = 1 },
+};
+
+/* ATAPI-4 UDMA specs (in clocks) */
+struct udmaspec {
+ u32 tcyc;
+ u32 t2cyc;
+ u32 tds;
+ u32 tdh;
+ u32 tdvs;
+ u32 tdvh;
+ u32 tfs;
+ u32 tli;
+ u32 tmli;
+ u32 taz;
+ u32 tzah;
+ u32 tenv;
+ u32 tsr;
+ u32 trfs;
+ u32 trp;
+ u32 tack;
+ u32 tss;
+};
+
+static const struct udmaspec udmaspec66[6] = {
+ { .tcyc = 8, .t2cyc = 16, .tds = 1, .tdh = 1, .tdvs = 5, .tdvh = 1,
+ .tfs = 16, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
+ .tsr = 3, .trfs = 5, .trp = 11, .tack = 2, .tss = 4,
+ },
+ { .tcyc = 5, .t2cyc = 11, .tds = 1, .tdh = 1, .tdvs = 4, .tdvh = 1,
+ .tfs = 14, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
+ .tsr = 2, .trfs = 5, .trp = 9, .tack = 2, .tss = 4,
+ },
+ { .tcyc = 4, .t2cyc = 8, .tds = 1, .tdh = 1, .tdvs = 3, .tdvh = 1,
+ .tfs = 12, .tli = 10, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
+ .tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4,
+ },
+ { .tcyc = 3, .t2cyc = 6, .tds = 1, .tdh = 1, .tdvs = 2, .tdvh = 1,
+ .tfs = 9, .tli = 7, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
+ .tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4,
+ },
+ { .tcyc = 2, .t2cyc = 4, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1,
+ .tfs = 8, .tli = 8, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
+ .tsr = 2, .trfs = 4, .trp = 7, .tack = 2, .tss = 4,
+ },
+ { .tcyc = 2, .t2cyc = 2, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1,
+ .tfs = 6, .tli = 5, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
+ .tsr = 2, .trfs = 4, .trp = 6, .tack = 2, .tss = 4,
+ },
+};
+
+static const struct udmaspec udmaspec132[6] = {
+ { .tcyc = 15, .t2cyc = 31, .tds = 2, .tdh = 1, .tdvs = 10, .tdvh = 1,
+ .tfs = 30, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
+ .tsr = 7, .trfs = 10, .trp = 22, .tack = 3, .tss = 7,
+ },
+ { .tcyc = 10, .t2cyc = 21, .tds = 2, .tdh = 1, .tdvs = 7, .tdvh = 1,
+ .tfs = 27, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
+ .tsr = 4, .trfs = 10, .trp = 17, .tack = 3, .tss = 7,
+ },
+ { .tcyc = 6, .t2cyc = 12, .tds = 1, .tdh = 1, .tdvs = 5, .tdvh = 1,
+ .tfs = 23, .tli = 20, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
+ .tsr = 3, .trfs = 8, .trp = 14, .tack = 3, .tss = 7,
+ },
+ { .tcyc = 7, .t2cyc = 12, .tds = 1, .tdh = 1, .tdvs = 3, .tdvh = 1,
+ .tfs = 15, .tli = 13, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
+ .tsr = 3, .trfs = 8, .trp = 14, .tack = 3, .tss = 7,
+ },
+ { .tcyc = 2, .t2cyc = 5, .tds = 0, .tdh = 0, .tdvs = 1, .tdvh = 1,
+ .tfs = 16, .tli = 14, .tmli = 2, .taz = 1, .tzah = 2, .tenv = 2,
+ .tsr = 2, .trfs = 7, .trp = 13, .tack = 2, .tss = 6,
+ },
+ { .tcyc = 3, .t2cyc = 6, .tds = 1, .tdh = 1, .tdvs = 1, .tdvh = 1,
+ .tfs = 12, .tli = 10, .tmli = 3, .taz = 2, .tzah = 3, .tenv = 3,
+ .tsr = 3, .trfs = 7, .trp = 12, .tack = 3, .tss = 7,
+ },
+};
+
+/* ======================================================================== */
/* Bit definitions inside the registers */
#define MPC52xx_ATA_HOSTCONF_SMR 0x80000000UL /* State machine reset */
@@ -66,6 +188,7 @@ static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
#define MPC52xx_ATA_HOSTSTAT_WERR 0x01000000UL /* Write Error */
#define MPC52xx_ATA_FIFOSTAT_EMPTY 0x01 /* FIFO Empty */
+#define MPC52xx_ATA_FIFOSTAT_ERROR 0x40 /* FIFO Error */
#define MPC52xx_ATA_DMAMODE_WRITE 0x01 /* Write DMA */
#define MPC52xx_ATA_DMAMODE_READ 0x02 /* Read DMA */
@@ -75,6 +198,8 @@ static const int ataspec_ta[5] = { 35, 35, 35, 35, 35};
#define MPC52xx_ATA_DMAMODE_FR 0x20 /* FIFO Reset */
#define MPC52xx_ATA_DMAMODE_HUT 0x40 /* Host UDMA burst terminate */
+#define MAX_DMA_BUFFERS 128
+#define MAX_DMA_BUFFER_SIZE 0x20000u
/* Structure of the hardware registers */
struct mpc52xx_ata {
@@ -140,7 +265,6 @@ struct mpc52xx_ata {
/* MPC52xx low level hw control */
-
static int
mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
{
@@ -148,7 +272,7 @@ mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
unsigned int ipb_period = priv->ipb_period;
unsigned int t0, t1, t2_8, t2_16, t2i, t4, ta;
- if ((pio<0) || (pio>4))
+ if ((pio < 0) || (pio > 4))
return -EINVAL;
t0 = CALC_CLKCYC(ipb_period, 1000 * ataspec_t0[pio]);
@@ -165,6 +289,43 @@ mpc52xx_ata_compute_pio_timings(struct mpc52xx_ata_priv *priv, int dev, int pio)
return 0;
}
+static int
+mpc52xx_ata_compute_mdma_timings(struct mpc52xx_ata_priv *priv, int dev,
+ int speed)
+{
+ struct mpc52xx_ata_timings *t = &priv->timings[dev];
+ const struct mdmaspec *s = &priv->mdmaspec[speed];
+
+ if (speed < 0 || speed > 2)
+ return -EINVAL;
+
+ t->mdma1 = (s->t0M << 24) | (s->td << 16) | (s->tkw << 8) | (s->tm);
+ t->mdma2 = (s->th << 24) | (s->tj << 16) | (s->tn << 8);
+ t->using_udma = 0;
+
+ return 0;
+}
+
+static int
+mpc52xx_ata_compute_udma_timings(struct mpc52xx_ata_priv *priv, int dev,
+ int speed)
+{
+ struct mpc52xx_ata_timings *t = &priv->timings[dev];
+ const struct udmaspec *s = &priv->udmaspec[speed];
+
+ if (speed < 0 || speed > 2)
+ return -EINVAL;
+
+ t->udma1 = (s->t2cyc << 24) | (s->tcyc << 16) | (s->tds << 8) | s->tdh;
+ t->udma2 = (s->tdvs << 24) | (s->tdvh << 16) | (s->tfs << 8) | s->tli;
+ t->udma3 = (s->tmli << 24) | (s->taz << 16) | (s->tenv << 8) | s->tsr;
+ t->udma4 = (s->tss << 24) | (s->trfs << 16) | (s->trp << 8) | s->tack;
+ t->udma5 = (s->tzah << 24);
+ t->using_udma = 1;
+
+ return 0;
+}
+
static void
mpc52xx_ata_apply_timings(struct mpc52xx_ata_priv *priv, int device)
{
@@ -173,14 +334,13 @@ mpc52xx_ata_apply_timings(struct mpc52xx_ata_priv *priv, int device)
out_be32(&regs->pio1, timing->pio1);
out_be32(&regs->pio2, timing->pio2);
- out_be32(&regs->mdma1, 0);
- out_be32(&regs->mdma2, 0);
- out_be32(&regs->udma1, 0);
- out_be32(&regs->udma2, 0);
- out_be32(&regs->udma3, 0);
- out_be32(&regs->udma4, 0);
- out_be32(&regs->udma5, 0);
-
+ out_be32(&regs->mdma1, timing->mdma1);
+ out_be32(&regs->mdma2, timing->mdma2);
+ out_be32(&regs->udma1, timing->udma1);
+ out_be32(&regs->udma2, timing->udma2);
+ out_be32(&regs->udma3, timing->udma3);
+ out_be32(&regs->udma4, timing->udma4);
+ out_be32(&regs->udma5, timing->udma5);
priv->csel = device;
}
@@ -208,7 +368,7 @@ mpc52xx_ata_hw_init(struct mpc52xx_ata_priv *priv)
/* Set the time slot to 1us */
tslot = CALC_CLKCYC(priv->ipb_period, 1000000);
- out_be32(&regs->share_cnt, tslot << 16 );
+ out_be32(&regs->share_cnt, tslot << 16);
/* Init timings to PIO0 */
memset(priv->timings, 0x00, 2*sizeof(struct mpc52xx_ata_timings));
@@ -237,13 +397,37 @@ mpc52xx_ata_set_piomode(struct ata_port *ap, struct ata_device *adev)
rv = mpc52xx_ata_compute_pio_timings(priv, adev->devno, pio);
if (rv) {
- printk(KERN_ERR DRV_NAME
- ": Trying to select invalid PIO mode %d\n", pio);
+ dev_err(ap->dev, "error: invalid PIO mode: %d\n", pio);
+ return;
+ }
+
+ mpc52xx_ata_apply_timings(priv, adev->devno);
+}
+
+static void
+mpc52xx_ata_set_dmamode(struct ata_port *ap, struct ata_device *adev)
+{
+ struct mpc52xx_ata_priv *priv = ap->host->private_data;
+ int rv;
+
+ if (adev->dma_mode >= XFER_UDMA_0) {
+ int dma = adev->dma_mode - XFER_UDMA_0;
+ rv = mpc52xx_ata_compute_udma_timings(priv, adev->devno, dma);
+ } else {
+ int dma = adev->dma_mode - XFER_MW_DMA_0;
+ rv = mpc52xx_ata_compute_mdma_timings(priv, adev->devno, dma);
+ }
+
+ if (rv) {
+ dev_alert(ap->dev,
+ "Trying to select invalid DMA mode %d\n",
+ adev->dma_mode);
return;
}
mpc52xx_ata_apply_timings(priv, adev->devno);
}
+
static void
mpc52xx_ata_dev_select(struct ata_port *ap, unsigned int device)
{
@@ -252,7 +436,173 @@ mpc52xx_ata_dev_select(struct ata_port *ap, unsigned int device)
if (device != priv->csel)
mpc52xx_ata_apply_timings(priv, device);
- ata_sff_dev_select(ap,device);
+ ata_sff_dev_select(ap, device);
+}
+
+static int
+mpc52xx_ata_build_dmatable(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct mpc52xx_ata_priv *priv = ap->host->private_data;
+ struct bcom_ata_bd *bd;
+ unsigned int read = !(qc->tf.flags & ATA_TFLAG_WRITE), si;
+ struct scatterlist *sg;
+ int count = 0;
+
+ if (read)
+ bcom_ata_rx_prepare(priv->dmatsk);
+ else
+ bcom_ata_tx_prepare(priv->dmatsk);
+
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ dma_addr_t cur_addr = sg_dma_address(sg);
+ u32 cur_len = sg_dma_len(sg);
+
+ while (cur_len) {
+ unsigned int tc = min(cur_len, MAX_DMA_BUFFER_SIZE);
+ bd = (struct bcom_ata_bd *)
+ bcom_prepare_next_buffer(priv->dmatsk);
+
+ if (read) {
+ bd->status = tc;
+ bd->src_pa = (__force u32) priv->ata_regs_pa +
+ offsetof(struct mpc52xx_ata, fifo_data);
+ bd->dst_pa = (__force u32) cur_addr;
+ } else {
+ bd->status = tc;
+ bd->src_pa = (__force u32) cur_addr;
+ bd->dst_pa = (__force u32) priv->ata_regs_pa +
+ offsetof(struct mpc52xx_ata, fifo_data);
+ }
+
+ bcom_submit_next_buffer(priv->dmatsk, NULL);
+
+ cur_addr += tc;
+ cur_len -= tc;
+ count++;
+
+ if (count > MAX_DMA_BUFFERS) {
+ dev_alert(ap->dev, "dma table"
+ "too small\n");
+ goto use_pio_instead;
+ }
+ }
+ }
+ return 1;
+
+ use_pio_instead:
+ bcom_ata_reset_bd(priv->dmatsk);
+ return 0;
+}
+
+static void
+mpc52xx_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct mpc52xx_ata_priv *priv = ap->host->private_data;
+ struct mpc52xx_ata __iomem *regs = priv->ata_regs;
+
+ unsigned int read = !(qc->tf.flags & ATA_TFLAG_WRITE);
+ u8 dma_mode;
+
+ if (!mpc52xx_ata_build_dmatable(qc))
+ dev_alert(ap->dev, "%s: %i, return 1?\n",
+ __func__, __LINE__);
+
+ /* Check FIFO is OK... */
+ if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR)
+ dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n",
+ __func__, in_8(&priv->ata_regs->fifo_status));
+
+ if (read) {
+ dma_mode = MPC52xx_ATA_DMAMODE_IE | MPC52xx_ATA_DMAMODE_READ |
+ MPC52xx_ATA_DMAMODE_FE;
+
+ /* Setup FIFO if direction changed */
+ if (priv->mpc52xx_ata_dma_last_write != 0) {
+ priv->mpc52xx_ata_dma_last_write = 0;
+
+ /* Configure FIFO with granularity to 7 */
+ out_8(&regs->fifo_control, 7);
+ out_be16(&regs->fifo_alarm, 128);
+
+ /* Set FIFO Reset bit (FR) */
+ out_8(&regs->dma_mode, MPC52xx_ATA_DMAMODE_FR);
+ }
+ } else {
+ dma_mode = MPC52xx_ATA_DMAMODE_IE | MPC52xx_ATA_DMAMODE_WRITE;
+
+ /* Setup FIFO if direction changed */
+ if (priv->mpc52xx_ata_dma_last_write != 1) {
+ priv->mpc52xx_ata_dma_last_write = 1;
+
+ /* Configure FIFO with granularity to 4 */
+ out_8(&regs->fifo_control, 4);
+ out_be16(&regs->fifo_alarm, 128);
+ }
+ }
+
+ if (priv->timings[qc->dev->devno].using_udma)
+ dma_mode |= MPC52xx_ATA_DMAMODE_UDMA;
+
+ out_8(&regs->dma_mode, dma_mode);
+ priv->waiting_for_dma = ATA_DMA_ACTIVE;
+
+ ata_wait_idle(ap);
+ ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+static void
+mpc52xx_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct mpc52xx_ata_priv *priv = ap->host->private_data;
+
+ bcom_set_task_auto_start(priv->dmatsk->tasknum, priv->dmatsk->tasknum);
+ bcom_enable(priv->dmatsk);
+}
+
+static void
+mpc52xx_bmdma_stop(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct mpc52xx_ata_priv *priv = ap->host->private_data;
+
+ bcom_disable(priv->dmatsk);
+ bcom_ata_reset_bd(priv->dmatsk);
+ priv->waiting_for_dma = 0;
+
+ /* Check FIFO is OK... */
+ if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR)
+ dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n",
+ __func__, in_8(&priv->ata_regs->fifo_status));
+}
+
+static u8
+mpc52xx_bmdma_status(struct ata_port *ap)
+{
+ struct mpc52xx_ata_priv *priv = ap->host->private_data;
+
+ /* Check FIFO is OK... */
+ if (in_8(&priv->ata_regs->fifo_status) & MPC52xx_ATA_FIFOSTAT_ERROR) {
+ dev_alert(ap->dev, "%s: FIFO error detected: 0x%02x!\n",
+ __func__, in_8(&priv->ata_regs->fifo_status));
+ return priv->waiting_for_dma | ATA_DMA_ERR;
+ }
+
+ return priv->waiting_for_dma;
+}
+
+static irqreturn_t
+mpc52xx_ata_task_irq(int irq, void *vpriv)
+{
+ struct mpc52xx_ata_priv *priv = vpriv;
+ while (bcom_buffer_done(priv->dmatsk))
+ bcom_retrieve_buffer(priv->dmatsk, NULL, NULL);
+
+ priv->waiting_for_dma |= ATA_DMA_INTR;
+
+ return IRQ_HANDLED;
}
static struct scsi_host_template mpc52xx_ata_sht = {
@@ -262,14 +612,18 @@ static struct scsi_host_template mpc52xx_ata_sht = {
static struct ata_port_operations mpc52xx_ata_port_ops = {
.inherits = &ata_sff_port_ops,
.sff_dev_select = mpc52xx_ata_dev_select,
- .cable_detect = ata_cable_40wire,
.set_piomode = mpc52xx_ata_set_piomode,
- .post_internal_cmd = ATA_OP_NULL,
+ .set_dmamode = mpc52xx_ata_set_dmamode,
+ .bmdma_setup = mpc52xx_bmdma_setup,
+ .bmdma_start = mpc52xx_bmdma_start,
+ .bmdma_stop = mpc52xx_bmdma_stop,
+ .bmdma_status = mpc52xx_bmdma_status,
+ .qc_prep = ata_noop_qc_prep,
};
static int __devinit
mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
- unsigned long raw_ata_regs)
+ unsigned long raw_ata_regs, int mwdma_mask, int udma_mask)
{
struct ata_host *host;
struct ata_port *ap;
@@ -281,9 +635,9 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv,
ap = host->ports[0];
ap->flags |= ATA_FLAG_SLAVE_POSS;
- ap->pio_mask = 0x1f; /* Up to PIO4 */
- ap->mwdma_mask = 0x00; /* No MWDMA */
- ap->udma_mask = 0x00; /* No UDMA */
+ ap->pio_mask = ATA_PIO4;
+ ap->mwdma_mask = mwdma_mask;
+ ap->udma_mask = udma_mask;
ap->ops = &mpc52xx_ata_port_ops;
host->private_data = priv;
@@ -330,89 +684,139 @@ mpc52xx_ata_probe(struct of_device *op, const struct of_device_id *match)
{
unsigned int ipb_freq;
struct resource res_mem;
- int ata_irq;
+ int ata_irq = 0;
struct mpc52xx_ata __iomem *ata_regs;
- struct mpc52xx_ata_priv *priv;
- int rv;
+ struct mpc52xx_ata_priv *priv = NULL;
+ int rv, ret, task_irq = 0;
+ int mwdma_mask = 0, udma_mask = 0;
+ const __be32 *prop;
+ int proplen;
+ struct bcom_task *dmatsk = NULL;
/* Get ipb frequency */
ipb_freq = mpc52xx_find_ipb_freq(op->node);
if (!ipb_freq) {
- printk(KERN_ERR DRV_NAME ": "
- "Unable to find IPB Bus frequency\n" );
+ dev_err(&op->dev, "could not determine IPB bus frequency\n");
return -ENODEV;
}
- /* Get IRQ and register */
+ /* Get device base address from device tree, request the region
+ * and ioremap it. */
rv = of_address_to_resource(op->node, 0, &res_mem);
if (rv) {
- printk(KERN_ERR DRV_NAME ": "
- "Error while parsing device node resource\n" );
+ dev_err(&op->dev, "could not determine device base address\n");
return rv;
}
- ata_irq = irq_of_parse_and_map(op->node, 0);
- if (ata_irq == NO_IRQ) {
- printk(KERN_ERR DRV_NAME ": "
- "Error while mapping the irq\n");
- return -EINVAL;
- }
-
- /* Request mem region */
if (!devm_request_mem_region(&op->dev, res_mem.start,
- sizeof(struct mpc52xx_ata), DRV_NAME)) {
- printk(KERN_ERR DRV_NAME ": "
- "Error while requesting mem region\n");
- rv = -EBUSY;
- goto err;
+ sizeof(*ata_regs), DRV_NAME)) {
+ dev_err(&op->dev, "error requesting register region\n");
+ return -EBUSY;
}
- /* Remap registers */
- ata_regs = devm_ioremap(&op->dev, res_mem.start,
- sizeof(struct mpc52xx_ata));
+ ata_regs = devm_ioremap(&op->dev, res_mem.start, sizeof(*ata_regs));
if (!ata_regs) {
- printk(KERN_ERR DRV_NAME ": "
- "Error while mapping register set\n");
+ dev_err(&op->dev, "error mapping device registers\n");
rv = -ENOMEM;
goto err;
}
+ /*
+ * By default, all DMA modes are disabled for the MPC5200. Some
+ * boards don't have the required signals routed to make DMA work.
+ * Also, the MPC5200B has a silicon bug that causes data corruption
+ * with UDMA if it is used at the same time as the LocalPlus bus.
+ *
+ * Instead of trying to guess what modes are usable, check the
+ * ATA device tree node to find out what DMA modes work on the board.
+ * UDMA/MWDMA modes can also be forced by adding "libata.force=<mode>"
+ * to the kernel boot parameters.
+ *
+ * The MPC5200 ATA controller supports MWDMA modes 0, 1 and 2 and
+ * UDMA modes 0, 1 and 2.
+ */
+ prop = of_get_property(op->node, "mwdma-mode", &proplen);
+ if ((prop) && (proplen >= 4))
+ mwdma_mask = 0x7 & ((1 << (*prop + 1)) - 1);
+ prop = of_get_property(op->node, "udma-mode", &proplen);
+ if ((prop) && (proplen >= 4))
+ udma_mask = 0x7 & ((1 << (*prop + 1)) - 1);
+
+ ata_irq = irq_of_parse_and_map(op->node, 0);
+ if (ata_irq == NO_IRQ) {
+ dev_err(&op->dev, "error mapping irq\n");
+ return -EINVAL;
+ }
+
/* Prepare our private structure */
- priv = devm_kzalloc(&op->dev, sizeof(struct mpc52xx_ata_priv),
- GFP_ATOMIC);
+ priv = devm_kzalloc(&op->dev, sizeof(*priv), GFP_ATOMIC);
if (!priv) {
- printk(KERN_ERR DRV_NAME ": "
- "Error while allocating private structure\n");
+ dev_err(&op->dev, "error allocating private structure\n");
rv = -ENOMEM;
goto err;
}
priv->ipb_period = 1000000000 / (ipb_freq / 1000);
priv->ata_regs = ata_regs;
+ priv->ata_regs_pa = res_mem.start;
priv->ata_irq = ata_irq;
priv->csel = -1;
+ priv->mpc52xx_ata_dma_last_write = -1;
+
+ if (ipb_freq/1000000 == 66) {
+ priv->mdmaspec = mdmaspec66;
+ priv->udmaspec = udmaspec66;
+ } else {
+ priv->mdmaspec = mdmaspec132;
+ priv->udmaspec = udmaspec132;
+ }
+
+ /* Allocate a BestComm task for DMA */
+ dmatsk = bcom_ata_init(MAX_DMA_BUFFERS, MAX_DMA_BUFFER_SIZE);
+ if (!dmatsk) {
+ dev_err(&op->dev, "bestcomm initialization failed\n");
+ rv = -ENOMEM;
+ goto err;
+ }
+
+ task_irq = bcom_get_task_irq(dmatsk);
+ ret = request_irq(task_irq, &mpc52xx_ata_task_irq, IRQF_DISABLED,
+ "ATA task", priv);
+ if (ret) {
+ dev_err(&op->dev, "error requesting DMA IRQ\n");
+ goto err;
+ }
+ priv->dmatsk = dmatsk;
/* Init the hw */
rv = mpc52xx_ata_hw_init(priv);
if (rv) {
- printk(KERN_ERR DRV_NAME ": Error during HW init\n");
+ dev_err(&op->dev, "error initializing hardware\n");
goto err;
}
/* Register ourselves to libata */
- rv = mpc52xx_ata_init_one(&op->dev, priv, res_mem.start);
+ rv = mpc52xx_ata_init_one(&op->dev, priv, res_mem.start,
+ mwdma_mask, udma_mask);
if (rv) {
- printk(KERN_ERR DRV_NAME ": "
- "Error while registering to ATA layer\n");
- return rv;
+ dev_err(&op->dev, "error registering with ATA layer\n");
+ goto err;
}
- /* Done */
return 0;
- /* Error path */
-err:
- irq_dispose_mapping(ata_irq);
+ err:
+ devm_release_mem_region(&op->dev, res_mem.start, sizeof(*ata_regs));
+ if (ata_irq)
+ irq_dispose_mapping(ata_irq);
+ if (task_irq)
+ irq_dispose_mapping(task_irq);
+ if (dmatsk)
+ bcom_ata_release(dmatsk);
+ if (ata_regs)
+ devm_iounmap(&op->dev, ata_regs);
+ if (priv)
+ devm_kfree(&op->dev, priv);
return rv;
}
@@ -420,10 +824,23 @@ static int
mpc52xx_ata_remove(struct of_device *op)
{
struct mpc52xx_ata_priv *priv;
+ int task_irq;
+ /* Deregister the ATA interface */
priv = mpc52xx_ata_remove_one(&op->dev);
+
+ /* Clean up DMA */
+ task_irq = bcom_get_task_irq(priv->dmatsk);
+ irq_dispose_mapping(task_irq);
+ bcom_ata_release(priv->dmatsk);
irq_dispose_mapping(priv->ata_irq);
+ /* Clear up IO allocations */
+ devm_iounmap(&op->dev, priv->ata_regs);
+ devm_release_mem_region(&op->dev, priv->ata_regs_pa,
+ sizeof(*priv->ata_regs));
+ devm_kfree(&op->dev, priv);
+
return 0;
}
@@ -447,7 +864,7 @@ mpc52xx_ata_resume(struct of_device *op)
rv = mpc52xx_ata_hw_init(priv);
if (rv) {
- printk(KERN_ERR DRV_NAME ": Error during HW init\n");
+ dev_err(host->dev, "error initializing hardware\n");
return rv;
}
@@ -507,5 +924,4 @@ MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
MODULE_DESCRIPTION("Freescale MPC52xx IDE/ATA libata driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(of, mpc52xx_ata_of_match);
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 8783457b93d3..c602b547cc6e 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -641,6 +641,12 @@ config HVC_XEN
help
Xen virtual console device driver
+config HVC_UDBG
+ bool "udbg based fake hypervisor console"
+ depends on PPC && EXPERIMENTAL
+ select HVC_DRIVER
+ default n
+
config VIRTIO_CONSOLE
tristate "Virtio console"
depends on VIRTIO
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 36151bae0d72..9caf5b5ad1c0 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
obj-$(CONFIG_HVC_XEN) += hvc_xen.o
obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o
+obj-$(CONFIG_HVC_UDBG) += hvc_udbg.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 456f54db73e2..977dfb1096a0 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -60,6 +60,8 @@ struct bsr_dev {
unsigned bsr_num; /* bsr id number for its type */
int bsr_minor;
+ struct list_head bsr_list;
+
dev_t bsr_dev;
struct cdev bsr_cdev;
struct device *bsr_device;
@@ -67,8 +69,8 @@ struct bsr_dev {
};
-static unsigned num_bsr_devs;
-static struct bsr_dev *bsr_devs;
+static unsigned total_bsr_devs;
+static struct list_head bsr_devs = LIST_HEAD_INIT(bsr_devs);
static struct class *bsr_class;
static int bsr_major;
@@ -146,24 +148,25 @@ const static struct file_operations bsr_fops = {
static void bsr_cleanup_devs(void)
{
- int i;
- for (i=0 ; i < num_bsr_devs; i++) {
- struct bsr_dev *cur = bsr_devs + i;
+ struct bsr_dev *cur, *n;
+
+ list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) {
if (cur->bsr_device) {
cdev_del(&cur->bsr_cdev);
device_del(cur->bsr_device);
}
+ list_del(&cur->bsr_list);
+ kfree(cur);
}
-
- kfree(bsr_devs);
}
-static int bsr_create_devs(struct device_node *bn)
+static int bsr_add_node(struct device_node *bn)
{
- int bsr_stride_len, bsr_bytes_len;
+ int bsr_stride_len, bsr_bytes_len, num_bsr_devs;
const u32 *bsr_stride;
const u32 *bsr_bytes;
unsigned i;
+ int ret = -ENODEV;
bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len);
bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len);
@@ -171,35 +174,36 @@ static int bsr_create_devs(struct device_node *bn)
if (!bsr_stride || !bsr_bytes ||
(bsr_stride_len != bsr_bytes_len)) {
printk(KERN_ERR "bsr of-node has missing/incorrect property\n");
- return -ENODEV;
+ return ret;
}
num_bsr_devs = bsr_bytes_len / sizeof(u32);
- /* only a warning, its informational since we'll fail and exit */
- WARN_ON(num_bsr_devs > BSR_MAX_DEVS);
-
- bsr_devs = kzalloc(sizeof(struct bsr_dev) * num_bsr_devs, GFP_KERNEL);
- if (!bsr_devs)
- return -ENOMEM;
-
for (i = 0 ; i < num_bsr_devs; i++) {
- struct bsr_dev *cur = bsr_devs + i;
+ struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev),
+ GFP_KERNEL);
struct resource res;
int result;
+ if (!cur) {
+ printk(KERN_ERR "Unable to alloc bsr dev\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
result = of_address_to_resource(bn, i, &res);
if (result < 0) {
- printk(KERN_ERR "bsr of-node has invalid reg property\n");
- goto out_err;
+ printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n");
+ kfree(cur);
+ continue;
}
- cur->bsr_minor = i;
+ cur->bsr_minor = i + total_bsr_devs;
cur->bsr_addr = res.start;
cur->bsr_len = res.end - res.start + 1;
cur->bsr_bytes = bsr_bytes[i];
cur->bsr_stride = bsr_stride[i];
- cur->bsr_dev = MKDEV(bsr_major, i);
+ cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
switch(cur->bsr_bytes) {
case 8:
@@ -220,14 +224,15 @@ static int bsr_create_devs(struct device_node *bn)
}
cur->bsr_num = bsr_types[cur->bsr_type];
- bsr_types[cur->bsr_type] = cur->bsr_num + 1;
snprintf(cur->bsr_name, 32, "bsr%d_%d",
cur->bsr_bytes, cur->bsr_num);
cdev_init(&cur->bsr_cdev, &bsr_fops);
result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1);
- if (result)
+ if (result) {
+ kfree(cur);
goto out_err;
+ }
cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
cur, cur->bsr_name);
@@ -235,16 +240,37 @@ static int bsr_create_devs(struct device_node *bn)
printk(KERN_ERR "device_create failed for %s\n",
cur->bsr_name);
cdev_del(&cur->bsr_cdev);
+ kfree(cur);
goto out_err;
}
+
+ bsr_types[cur->bsr_type] = cur->bsr_num + 1;
+ list_add_tail(&cur->bsr_list, &bsr_devs);
}
+ total_bsr_devs += num_bsr_devs;
+
return 0;
out_err:
bsr_cleanup_devs();
- return -ENODEV;
+ return ret;
+}
+
+static int bsr_create_devs(struct device_node *bn)
+{
+ int ret;
+
+ while (bn) {
+ ret = bsr_add_node(bn);
+ if (ret) {
+ of_node_put(bn);
+ return ret;
+ }
+ bn = of_find_compatible_node(bn, NULL, "ibm,bsr");
+ }
+ return 0;
}
static int __init bsr_init(void)
@@ -254,7 +280,7 @@ static int __init bsr_init(void)
int ret = -ENODEV;
int result;
- np = of_find_compatible_node(NULL, "ibm,bsr", "ibm,bsr");
+ np = of_find_compatible_node(NULL, NULL, "ibm,bsr");
if (!np)
goto out_err;
@@ -272,10 +298,10 @@ static int __init bsr_init(void)
goto out_err_2;
}
- if ((ret = bsr_create_devs(np)) < 0)
+ if ((ret = bsr_create_devs(np)) < 0) {
+ np = NULL;
goto out_err_3;
-
- of_node_put(np);
+ }
return 0;
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 5b819b12675a..fb57f67bb427 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -642,8 +642,11 @@ int hvc_poll(struct hvc_struct *hp)
/* Handle the SysRq Hack */
/* XXX should support a sequence */
if (buf[i] == '\x0f') { /* ^O */
- sysrq_pressed = 1;
- continue;
+ /* if ^O is pressed again, reset
+ * sysrq_pressed and flip ^O char */
+ sysrq_pressed = !sysrq_pressed;
+ if (sysrq_pressed)
+ continue;
} else if (sysrq_pressed) {
handle_sysrq(buf[i], tty);
sysrq_pressed = 0;
@@ -689,10 +692,8 @@ EXPORT_SYMBOL_GPL(hvc_poll);
*/
void hvc_resize(struct hvc_struct *hp, struct winsize ws)
{
- if ((hp->ws.ws_row != ws.ws_row) || (hp->ws.ws_col != ws.ws_col)) {
- hp->ws = ws;
- schedule_work(&hp->tty_resize);
- }
+ hp->ws = ws;
+ schedule_work(&hp->tty_resize);
}
/*
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 8297dbc2e6ec..3c85d78c975c 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -48,7 +48,7 @@ struct hvc_struct {
spinlock_t lock;
int index;
struct tty_struct *tty;
- unsigned int count;
+ int count;
int do_wakeup;
char *outbuf;
int outbuf_size;
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
index b74a2f8ab908..449727b6166d 100644
--- a/drivers/char/hvc_iseries.c
+++ b/drivers/char/hvc_iseries.c
@@ -575,8 +575,10 @@ static int __init hvc_find_vtys(void)
* of console adapters.
*/
if ((num_found >= MAX_NR_HVC_CONSOLES) ||
- (num_found >= VTTY_PORTS))
+ (num_found >= VTTY_PORTS)) {
+ of_node_put(vty);
break;
+ }
vtermno = of_get_property(vty, "reg", NULL);
if (!vtermno)
diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
new file mode 100644
index 000000000000..bd63ba878a56
--- /dev/null
+++ b/drivers/char/hvc_udbg.c
@@ -0,0 +1,96 @@
+/*
+ * udbg interface to hvc_console.c
+ *
+ * (C) Copyright David Gibson, IBM Corporation 2008.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/irq.h>
+
+#include <asm/udbg.h>
+
+#include "hvc_console.h"
+
+struct hvc_struct *hvc_udbg_dev;
+
+static int hvc_udbg_put(uint32_t vtermno, const char *buf, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ udbg_putc(buf[i]);
+
+ return i;
+}
+
+static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
+{
+ int i, c;
+
+ if (!udbg_getc_poll)
+ return 0;
+
+ for (i = 0; i < count; i++) {
+ if ((c = udbg_getc_poll()) == -1)
+ break;
+ buf[i] = c;
+ }
+
+ return i;
+}
+
+static struct hv_ops hvc_udbg_ops = {
+ .get_chars = hvc_udbg_get,
+ .put_chars = hvc_udbg_put,
+};
+
+static int __init hvc_udbg_init(void)
+{
+ struct hvc_struct *hp;
+
+ BUG_ON(hvc_udbg_dev);
+
+ hp = hvc_alloc(0, NO_IRQ, &hvc_udbg_ops, 16);
+ if (IS_ERR(hp))
+ return PTR_ERR(hp);
+
+ hvc_udbg_dev = hp;
+
+ return 0;
+}
+module_init(hvc_udbg_init);
+
+static void __exit hvc_udbg_exit(void)
+{
+ if (hvc_udbg_dev)
+ hvc_remove(hvc_udbg_dev);
+}
+module_exit(hvc_udbg_exit);
+
+static int __init hvc_udbg_console_init(void)
+{
+ hvc_instantiate(0, 0, &hvc_udbg_ops);
+ add_preferred_console("hvc", 0, NULL);
+
+ return 0;
+}
+console_initcall(hvc_udbg_console_init);
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 019e0b58593d..bd62dc86b47d 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -153,8 +153,10 @@ static int hvc_find_vtys(void)
/* We have statically defined space for only a certain number
* of console adapters.
*/
- if (num_found >= MAX_NR_HVC_CONSOLES)
+ if (num_found >= MAX_NR_HVC_CONSOLES) {
+ of_node_put(vty);
break;
+ }
vtermno = of_get_property(vty, "reg", NULL);
if (!vtermno)
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 473d9b14439a..6e6eb445d374 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -269,7 +269,7 @@ struct hvcs_struct {
unsigned int index;
struct tty_struct *tty;
- unsigned int open_count;
+ int open_count;
/*
* Used to tell the driver kernel_thread what operations need to take
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c
index 59c6f9ab94e4..af055287271a 100644
--- a/drivers/char/hvsi.c
+++ b/drivers/char/hvsi.c
@@ -75,7 +75,7 @@ struct hvsi_struct {
spinlock_t lock;
int index;
struct tty_struct *tty;
- unsigned int count;
+ int count;
uint8_t throttle_buf[128];
uint8_t outbuf[N_OUTBUF]; /* to implement write_room and chars_in_buffer */
/* inbuf is for packet reassembly. leave a little room for leftovers. */
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index e0dbd388757f..e2667a8c2997 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -161,7 +161,7 @@ config EDAC_PASEMI
config EDAC_CELL
tristate "Cell Broadband Engine memory controller"
- depends on EDAC_MM_EDAC && PPC_CELL_NATIVE
+ depends on EDAC_MM_EDAC && PPC_CELL_COMMON
help
Support for error detection and correction on the
Cell Broadband Engine internal memory controller
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index d524dc245a2c..b40fb9b6c862 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -1814,7 +1814,7 @@ static int powerbook_sleep_grackle(void)
_set_L2CR(save_l2cr);
/* Restore userland MMU context */
- set_context(current->active_mm->context.id, current->active_mm->pgd);
+ switch_mmu_context(NULL, current->active_mm);
/* Power things up */
pmu_unlock();
@@ -1903,7 +1903,7 @@ powerbook_sleep_Core99(void)
_set_L3CR(save_l3cr);
/* Restore userland MMU context */
- set_context(current->active_mm->context.id, current->active_mm->pgd);
+ switch_mmu_context(NULL, current->active_mm);
/* Tell PMU we are ready */
pmu_unlock();
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 7f2be4baaeda..7847e981ac33 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -87,11 +87,12 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
return NULL;
}
- len = i2c_smbus_read_word_data(&sat->i2c, 9);
- if (len < 0) {
+ err = i2c_smbus_read_word_data(&sat->i2c, 9);
+ if (err < 0) {
printk(KERN_ERR "smu_sat_get_sdb_part rd len error\n");
return NULL;
}
+ len = err;
if (len == 0) {
printk(KERN_ERR "smu_sat_get_sdb_part no partition %x\n", id);
return NULL;
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 45dd9bdc5d62..dd9bfa42ac34 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -122,9 +122,6 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of,
out_be32(&priv->regs->mii_speed,
((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1);
- /* enable MII interrupt */
- out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII);
-
err = mdiobus_register(bus);
if (err)
goto out_unmap;
@@ -156,7 +153,7 @@ static int mpc52xx_fec_mdio_remove(struct of_device *of)
iounmap(priv->regs);
for (i=0; i<PHY_MAX_ADDR; i++)
- if (bus->irq[i])
+ if (bus->irq[i] != PHY_POLL)
irq_dispose_mapping(bus->irq[i]);
kfree(priv);
kfree(bus->irq);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 7c79e94a35ea..cd17092b82bd 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -329,6 +329,41 @@ struct device_node *of_find_compatible_node(struct device_node *from,
EXPORT_SYMBOL(of_find_compatible_node);
/**
+ * of_find_node_with_property - Find a node which has a property with
+ * the given name.
+ * @from: The node to start searching from or NULL, the node
+ * you pass will not be searched, only the next one
+ * will; typically, you pass what the previous call
+ * returned. of_node_put() will be called on it
+ * @prop_name: The name of the property to look for.
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_find_node_with_property(struct device_node *from,
+ const char *prop_name)
+{
+ struct device_node *np;
+ struct property *pp;
+
+ read_lock(&devtree_lock);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext) {
+ for (pp = np->properties; pp != 0; pp = pp->next) {
+ if (of_prop_cmp(pp->name, prop_name) == 0) {
+ of_node_get(np);
+ goto out;
+ }
+ }
+ }
+out:
+ of_node_put(from);
+ read_unlock(&devtree_lock);
+ return np;
+}
+EXPORT_SYMBOL(of_find_node_with_property);
+
+/**
* of_match_node - Tell if an device_node has a matching of_match structure
* @matches: array of of device match structures to search in
* @node: the of device structure to match against
@@ -464,8 +499,8 @@ EXPORT_SYMBOL_GPL(of_modalias_node);
* @list_name: property name that contains a list
* @cells_name: property name that specifies phandles' arguments count
* @index: index of a phandle to parse out
- * @out_node: pointer to device_node struct pointer (will be filled)
- * @out_args: pointer to arguments pointer (will be filled)
+ * @out_node: optional pointer to device_node struct pointer (will be filled)
+ * @out_args: optional pointer to arguments pointer (will be filled)
*
* This function is useful to parse lists of phandles and their arguments.
* Returns 0 on success and fills out_node and out_args, on error returns
@@ -499,7 +534,7 @@ int of_parse_phandles_with_args(struct device_node *np, const char *list_name,
int size;
int cur_index = 0;
struct device_node *node = NULL;
- const void *args;
+ const void *args = NULL;
list = of_get_property(np, list_name, &size);
if (!list) {
@@ -512,14 +547,12 @@ int of_parse_phandles_with_args(struct device_node *np, const char *list_name,
const u32 *cells;
const phandle *phandle;
- phandle = list;
- args = list + 1;
+ phandle = list++;
+ args = list;
/* one cell hole in the list = <>; */
- if (!*phandle) {
- list++;
+ if (!*phandle)
goto next;
- }
node = of_find_node_by_phandle(*phandle);
if (!node) {
@@ -535,8 +568,7 @@ int of_parse_phandles_with_args(struct device_node *np, const char *list_name,
goto err1;
}
- /* Next phandle is at offset of one phandle cell + #cells */
- list += 1 + *cells;
+ list += *cells;
if (list > list_end) {
pr_debug("%s: insufficient arguments length\n",
np->full_name);
@@ -548,16 +580,26 @@ next:
of_node_put(node);
node = NULL;
+ args = NULL;
cur_index++;
}
if (!node) {
- ret = -ENOENT;
+ /*
+ * args w/o node indicates that the loop above has stopped at
+ * the 'hole' cell. Report this differently.
+ */
+ if (args)
+ ret = -EEXIST;
+ else
+ ret = -ENOENT;
goto err0;
}
- *out_node = node;
- *out_args = args;
+ if (out_node)
+ *out_node = node;
+ if (out_args)
+ *out_args = args;
return 0;
err1:
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index 7cd7301b5839..6eea601a9204 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -19,14 +19,17 @@
#include <asm/prom.h>
/**
- * of_get_gpio - Get a GPIO number from the device tree to use with GPIO API
+ * of_get_gpio_flags - Get a GPIO number and flags to use with GPIO API
* @np: device node to get GPIO from
* @index: index of the GPIO
+ * @flags: a flags pointer to fill in
*
* Returns GPIO number to use with Linux generic GPIO API, or one of the errno
- * value on the error condition.
+ * value on the error condition. If @flags is not NULL the function also fills
+ * in flags for the GPIO.
*/
-int of_get_gpio(struct device_node *np, int index)
+int of_get_gpio_flags(struct device_node *np, int index,
+ enum of_gpio_flags *flags)
{
int ret;
struct device_node *gc;
@@ -59,7 +62,11 @@ int of_get_gpio(struct device_node *np, int index)
goto err1;
}
- ret = of_gc->xlate(of_gc, np, gpio_spec);
+ /* .xlate might decide to not fill in the flags, so clear it. */
+ if (flags)
+ *flags = 0;
+
+ ret = of_gc->xlate(of_gc, np, gpio_spec, flags);
if (ret < 0)
goto err1;
@@ -70,26 +77,75 @@ err0:
pr_debug("%s exited with status %d\n", __func__, ret);
return ret;
}
-EXPORT_SYMBOL(of_get_gpio);
+EXPORT_SYMBOL(of_get_gpio_flags);
/**
- * of_gpio_simple_xlate - translate gpio_spec to the GPIO number
+ * of_gpio_count - Count GPIOs for a device
+ * @np: device node to count GPIOs for
+ *
+ * The function returns the count of GPIOs specified for a node.
+ *
+ * Note that the empty GPIO specifiers counts too. For example,
+ *
+ * gpios = <0
+ * &pio1 1 2
+ * 0
+ * &pio2 3 4>;
+ *
+ * defines four GPIOs (so this function will return 4), two of which
+ * are not specified.
+ */
+unsigned int of_gpio_count(struct device_node *np)
+{
+ unsigned int cnt = 0;
+
+ do {
+ int ret;
+
+ ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells",
+ cnt, NULL, NULL);
+ /* A hole in the gpios = <> counts anyway. */
+ if (ret < 0 && ret != -EEXIST)
+ break;
+ } while (++cnt);
+
+ return cnt;
+}
+EXPORT_SYMBOL(of_gpio_count);
+
+/**
+ * of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags
* @of_gc: pointer to the of_gpio_chip structure
* @np: device node of the GPIO chip
* @gpio_spec: gpio specifier as found in the device tree
+ * @flags: a flags pointer to fill in
*
* This is simple translation function, suitable for the most 1:1 mapped
* gpio chips. This function performs only one sanity check: whether gpio
* is less than ngpios (that is specified in the gpio_chip).
*/
int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np,
- const void *gpio_spec)
+ const void *gpio_spec, enum of_gpio_flags *flags)
{
const u32 *gpio = gpio_spec;
+ /*
+ * We're discouraging gpio_cells < 2, since that way you'll have to
+ * write your own xlate function (that will have to retrive the GPIO
+ * number and the flags from a single gpio cell -- this is possible,
+ * but not recommended).
+ */
+ if (of_gc->gpio_cells < 2) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
if (*gpio > of_gc->gc.ngpio)
return -EINVAL;
+ if (flags)
+ *flags = gpio[1];
+
return *gpio;
}
EXPORT_SYMBOL(of_gpio_simple_xlate);
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index 24bbef777c19..e1b0ad6e918f 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -24,6 +24,7 @@ void of_register_i2c_devices(struct i2c_adapter *adap,
for_each_child_of_node(adap_node, node) {
struct i2c_board_info info = {};
+ struct dev_archdata dev_ad = {};
const u32 *addr;
int len;
@@ -41,6 +42,9 @@ void of_register_i2c_devices(struct i2c_adapter *adap,
info.addr = *addr;
+ dev_archdata_set_node(&dev_ad, node);
+ info.archdata = &dev_ad;
+
request_module("%s", info.type);
result = i2c_new_device(adap, &info);
@@ -51,6 +55,13 @@ void of_register_i2c_devices(struct i2c_adapter *adap,
irq_dispose_mapping(info.irq);
continue;
}
+
+ /*
+ * Get the node to not lose the dev_archdata->of_node.
+ * Currently there is no way to put it back, as well as no
+ * of_unregister_i2c_devices() call.
+ */
+ of_node_get(node);
}
}
EXPORT_SYMBOL(of_register_i2c_devices);
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 9c2a22fed18b..4e3e0382c16e 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -14,6 +14,9 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+
+#undef DEBUG
+
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/string.h>
@@ -151,20 +154,20 @@ static void dlpar_pci_add_bus(struct device_node *dn)
return;
}
+ /* Scan below the new bridge */
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
of_scan_pci_bridge(dn, dev);
- pcibios_fixup_new_pci_devices(dev->subordinate);
-
- /* Claim new bus resources */
- pcibios_claim_one_bus(dev->bus);
-
/* Map IO space for child bus, which may or may not succeed */
pcibios_map_io_space(dev->subordinate);
- /* Add new devices to global lists. Register in proc, sysfs. */
- pci_bus_add_devices(phb->bus);
+ /* Finish adding it : resource allocation, adding devices, etc...
+ * Note that we need to perform the finish pass on the -parent-
+ * bus of the EADS bridge so the bridge device itself gets
+ * properly added
+ */
+ pcibios_finish_adding_to_bus(phb->bus);
}
static int dlpar_add_pci_slot(char *drc_name, struct device_node *dn)
@@ -203,27 +206,6 @@ static int dlpar_add_pci_slot(char *drc_name, struct device_node *dn)
return 0;
}
-static int dlpar_remove_root_bus(struct pci_controller *phb)
-{
- struct pci_bus *phb_bus;
- int rc;
-
- phb_bus = phb->bus;
- if (!(list_empty(&phb_bus->children) &&
- list_empty(&phb_bus->devices))) {
- return -EBUSY;
- }
-
- rc = pcibios_remove_root_bus(phb);
- if (rc)
- return -EIO;
-
- device_unregister(phb_bus->bridge);
- pci_remove_bus(phb_bus);
-
- return 0;
-}
-
static int dlpar_remove_phb(char *drc_name, struct device_node *dn)
{
struct slot *slot;
@@ -235,18 +217,15 @@ static int dlpar_remove_phb(char *drc_name, struct device_node *dn)
/* If pci slot is hotplugable, use hotplug to remove it */
slot = find_php_slot(dn);
- if (slot) {
- if (rpaphp_deregister_slot(slot)) {
- printk(KERN_ERR
- "%s: unable to remove hotplug slot %s\n",
- __func__, drc_name);
- return -EIO;
- }
+ if (slot && rpaphp_deregister_slot(slot)) {
+ printk(KERN_ERR "%s: unable to remove hotplug slot %s\n",
+ __func__, drc_name);
+ return -EIO;
}
pdn = dn->data;
BUG_ON(!pdn || !pdn->phb);
- rc = dlpar_remove_root_bus(pdn->phb);
+ rc = remove_phb_dynamic(pdn->phb);
if (rc < 0)
return rc;
@@ -378,26 +357,38 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
if (!bus)
return -EINVAL;
- /* If pci slot is hotplugable, use hotplug to remove it */
+ pr_debug("PCI: Removing PCI slot below EADS bridge %s\n",
+ bus->self ? pci_name(bus->self) : "<!PHB!>");
+
slot = find_php_slot(dn);
if (slot) {
+ pr_debug("PCI: Removing hotplug slot for %04x:%02x...\n",
+ pci_domain_nr(bus), bus->number);
+
if (rpaphp_deregister_slot(slot)) {
printk(KERN_ERR
"%s: unable to remove hotplug slot %s\n",
__func__, drc_name);
return -EIO;
}
- } else
- pcibios_remove_pci_devices(bus);
+ }
+
+ /* Remove all devices below slot */
+ pcibios_remove_pci_devices(bus);
+ /* Unmap PCI IO space */
if (pcibios_unmap_io_space(bus)) {
printk(KERN_ERR "%s: failed to unmap bus range\n",
__func__);
return -ERANGE;
}
+ /* Remove the EADS bridge device itself */
BUG_ON(!bus->self);
+ pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self));
+ eeh_remove_bus_device(bus->self);
pci_remove_bus_device(bus->self);
+
return 0;
}
diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c
index 06848b254d57..5324978b73fb 100644
--- a/drivers/ps3/ps3av.c
+++ b/drivers/ps3/ps3av.c
@@ -59,8 +59,6 @@ static struct ps3av {
struct ps3av_reply_hdr reply_hdr;
u8 raw[PS3AV_BUF_SIZE];
} recv_buf;
- void (*flip_ctl)(int on, void *data);
- void *flip_data;
} *ps3av;
/* color space */
@@ -939,24 +937,6 @@ int ps3av_audio_mute(int mute)
EXPORT_SYMBOL_GPL(ps3av_audio_mute);
-void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data),
- void *flip_data)
-{
- mutex_lock(&ps3av->mutex);
- ps3av->flip_ctl = flip_ctl;
- ps3av->flip_data = flip_data;
- mutex_unlock(&ps3av->mutex);
-}
-EXPORT_SYMBOL_GPL(ps3av_register_flip_ctl);
-
-void ps3av_flip_ctl(int on)
-{
- mutex_lock(&ps3av->mutex);
- if (ps3av->flip_ctl)
- ps3av->flip_ctl(on, ps3av->flip_data);
- mutex_unlock(&ps3av->mutex);
-}
-
static int ps3av_probe(struct ps3_system_bus_device *dev)
{
int res;
diff --git a/drivers/ps3/ps3av_cmd.c b/drivers/ps3/ps3av_cmd.c
index 11eb50318fec..716596e8e5b0 100644
--- a/drivers/ps3/ps3av_cmd.c
+++ b/drivers/ps3/ps3av_cmd.c
@@ -864,7 +864,7 @@ int ps3av_cmd_avb_param(struct ps3av_pkt_avb_param *avb, u32 send_len)
{
int res;
- ps3av_flip_ctl(0); /* flip off */
+ mutex_lock(&ps3_gpu_mutex);
/* avb packet */
res = ps3av_do_pkt(PS3AV_CID_AVB_PARAM, send_len, sizeof(*avb),
@@ -878,7 +878,7 @@ int ps3av_cmd_avb_param(struct ps3av_pkt_avb_param *avb, u32 send_len)
res);
out:
- ps3av_flip_ctl(1); /* flip on */
+ mutex_unlock(&ps3_gpu_mutex);
return res;
}
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 643a6b98462b..5c13f61bfb1b 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -365,15 +365,15 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
rdid++)
rswitch->route_table[rdid] = RIO_INVALID_ROUTE;
rdev->rswitch = rswitch;
- sprintf(rio_name(rdev), "%02x:s:%04x", rdev->net->id,
- rdev->rswitch->switchid);
+ dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
+ rdev->rswitch->switchid);
rio_route_set_ops(rdev);
list_add_tail(&rswitch->node, &rio_switches);
} else
- sprintf(rio_name(rdev), "%02x:e:%04x", rdev->net->id,
- rdev->destid);
+ dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id,
+ rdev->destid);
rdev->dev.bus = &rio_bus_type;
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 28c00c3d58f5..0c3a2ab1612c 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -429,14 +429,24 @@ mpc52xx_uart_tx_empty(struct uart_port *port)
static void
mpc52xx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- /* Not implemented */
+ if (mctrl & TIOCM_RTS)
+ out_8(&PSC(port)->op1, MPC52xx_PSC_OP_RTS);
+ else
+ out_8(&PSC(port)->op0, MPC52xx_PSC_OP_RTS);
}
static unsigned int
mpc52xx_uart_get_mctrl(struct uart_port *port)
{
- /* Not implemented */
- return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+ unsigned int ret = TIOCM_DSR;
+ u8 status = in_8(&PSC(port)->mpc52xx_psc_ipcr);
+
+ if (!(status & MPC52xx_PSC_CTS))
+ ret |= TIOCM_CTS;
+ if (!(status & MPC52xx_PSC_DCD))
+ ret |= TIOCM_CAR;
+
+ return ret;
}
static void
@@ -479,7 +489,15 @@ mpc52xx_uart_stop_rx(struct uart_port *port)
static void
mpc52xx_uart_enable_ms(struct uart_port *port)
{
- /* Not implemented */
+ struct mpc52xx_psc __iomem *psc = PSC(port);
+
+ /* clear D_*-bits by reading them */
+ in_8(&psc->mpc52xx_psc_ipcr);
+ /* enable CTS and DCD as IPC interrupts */
+ out_8(&psc->mpc52xx_psc_acr, MPC52xx_PSC_IEC_CTS | MPC52xx_PSC_IEC_DCD);
+
+ port->read_status_mask |= MPC52xx_PSC_IMR_IPC;
+ out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
}
static void
@@ -580,6 +598,10 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
MPC52xx_PSC_MODE_ONE_STOP_5_BITS :
MPC52xx_PSC_MODE_ONE_STOP;
+ if (new->c_cflag & CRTSCTS) {
+ mr1 |= MPC52xx_PSC_MODE_RXRTS;
+ mr2 |= MPC52xx_PSC_MODE_TXCTS;
+ }
baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
@@ -617,6 +639,9 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
out_8(&psc->ctur, ctr >> 8);
out_8(&psc->ctlr, ctr & 0xff);
+ if (UART_ENABLE_MS(port, new->c_cflag))
+ mpc52xx_uart_enable_ms(port);
+
/* Reenable TX & RX */
out_8(&psc->command, MPC52xx_PSC_TX_ENABLE);
out_8(&psc->command, MPC52xx_PSC_RX_ENABLE);
@@ -752,10 +777,15 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
if (status & MPC52xx_PSC_SR_RB) {
flag = TTY_BREAK;
uart_handle_break(port);
- } else if (status & MPC52xx_PSC_SR_PE)
+ port->icount.brk++;
+ } else if (status & MPC52xx_PSC_SR_PE) {
flag = TTY_PARITY;
- else if (status & MPC52xx_PSC_SR_FE)
+ port->icount.parity++;
+ }
+ else if (status & MPC52xx_PSC_SR_FE) {
flag = TTY_FRAME;
+ port->icount.frame++;
+ }
/* Clear error condition */
out_8(&PSC(port)->command, MPC52xx_PSC_RST_ERR_STAT);
@@ -769,6 +799,7 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
* affect the current character
*/
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ port->icount.overrun++;
}
}
@@ -826,6 +857,7 @@ mpc52xx_uart_int(int irq, void *dev_id)
struct uart_port *port = dev_id;
unsigned long pass = ISR_PASS_LIMIT;
unsigned int keepgoing;
+ u8 status;
spin_lock(&port->lock);
@@ -842,6 +874,13 @@ mpc52xx_uart_int(int irq, void *dev_id)
if (psc_ops->tx_rdy(port))
keepgoing |= mpc52xx_uart_int_tx_chars(port);
+ status = in_8(&PSC(port)->mpc52xx_psc_ipcr);
+ if (status & MPC52xx_PSC_D_DCD)
+ uart_handle_dcd_change(port, !(status & MPC52xx_PSC_DCD));
+
+ if (status & MPC52xx_PSC_D_CTS)
+ uart_handle_cts_change(port, !(status & MPC52xx_PSC_CTS));
+
/* Limit number of iteration */
if (!(--pass))
keepgoing = 0;
@@ -1109,22 +1148,29 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
return ret;
port->mapbase = res.start;
+ if (!port->mapbase) {
+ dev_dbg(&op->dev, "Could not allocate resources for PSC\n");
+ return -EINVAL;
+ }
+
port->irq = irq_of_parse_and_map(op->node, 0);
+ if (port->irq == NO_IRQ) {
+ dev_dbg(&op->dev, "Could not get irq\n");
+ return -EINVAL;
+ }
dev_dbg(&op->dev, "mpc52xx-psc uart at %p, irq=%x, freq=%i\n",
(void *)port->mapbase, port->irq, port->uartclk);
- if ((port->irq == NO_IRQ) || !port->mapbase) {
- printk(KERN_ERR "Could not allocate resources for PSC\n");
- return -EINVAL;
- }
-
/* Add the port to the uart sub-system */
ret = uart_add_one_port(&mpc52xx_uart_driver, port);
- if (!ret)
- dev_set_drvdata(&op->dev, (void *)port);
+ if (ret) {
+ irq_dispose_mapping(port->irq);
+ return ret;
+ }
- return ret;
+ dev_set_drvdata(&op->dev, (void *)port);
+ return 0;
}
static int
diff --git a/drivers/serial/pmac_zilog.c b/drivers/serial/pmac_zilog.c
index 317b061f7641..ad3488504010 100644
--- a/drivers/serial/pmac_zilog.c
+++ b/drivers/serial/pmac_zilog.c
@@ -1383,6 +1383,29 @@ static int pmz_verify_port(struct uart_port *port, struct serial_struct *ser)
return -EINVAL;
}
+#ifdef CONFIG_CONSOLE_POLL
+
+static int pmz_poll_get_char(struct uart_port *port)
+{
+ struct uart_pmac_port *uap = (struct uart_pmac_port *)port;
+
+ while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0)
+ udelay(5);
+ return read_zsdata(uap);
+}
+
+static void pmz_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ struct uart_pmac_port *uap = (struct uart_pmac_port *)port;
+
+ /* Wait for the transmit buffer to empty. */
+ while ((read_zsreg(uap, R0) & Tx_BUF_EMP) == 0)
+ udelay(5);
+ write_zsdata(uap, c);
+}
+
+#endif
+
static struct uart_ops pmz_pops = {
.tx_empty = pmz_tx_empty,
.set_mctrl = pmz_set_mctrl,
@@ -1400,6 +1423,10 @@ static struct uart_ops pmz_pops = {
.request_port = pmz_request_port,
.config_port = pmz_config_port,
.verify_port = pmz_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = pmz_poll_get_char,
+ .poll_put_char = pmz_poll_put_char,
+#endif
};
/*
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index b0be7eac32d8..49fcbe8f18ac 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -298,10 +298,10 @@ static int controlfb_mmap(struct fb_info *info,
return -EINVAL;
start = info->fix.mmio_start;
len = PAGE_ALIGN((start & ~PAGE_MASK)+info->fix.mmio_len);
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE|_PAGE_GUARDED;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
} else {
/* framebuffer */
- pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
+ vma->vm_page_prot = pgprot_cached_wthru(vma->vm_page_prot);
}
start &= PAGE_MASK;
if ((vma->vm_end - vma->vm_start + off) > len)
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 4b5d80771904..38ac805db97d 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -460,12 +460,16 @@ static void ps3fb_sync_image(struct device *dev, u64 frame_offset,
line_length |= (u64)src_line_length << 32;
src_offset += GPU_FB_START;
+
+ mutex_lock(&ps3_gpu_mutex);
status = lv1_gpu_context_attribute(ps3fb.context_handle,
L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
dst_offset, GPU_IOIF + src_offset,
L1GPU_FB_BLIT_WAIT_FOR_COMPLETION |
(width << 16) | height,
line_length);
+ mutex_unlock(&ps3_gpu_mutex);
+
if (status)
dev_err(dev,
"%s: lv1_gpu_context_attribute FB_BLIT failed: %d\n",
@@ -784,15 +788,6 @@ static int ps3fb_wait_for_vsync(u32 crtc)
return 0;
}
-static void ps3fb_flip_ctl(int on, void *data)
-{
- struct ps3fb_priv *priv = data;
- if (on)
- atomic_dec_if_positive(&priv->ext_flip);
- else
- atomic_inc(&priv->ext_flip);
-}
-
/*
* ioctl
@@ -1228,7 +1223,6 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
}
ps3fb.task = task;
- ps3av_register_flip_ctl(ps3fb_flip_ctl, &ps3fb);
return 0;
@@ -1258,10 +1252,9 @@ static int ps3fb_shutdown(struct ps3_system_bus_device *dev)
dev_dbg(&dev->core, " -> %s:%d\n", __func__, __LINE__);
- ps3fb_flip_ctl(0, &ps3fb); /* flip off */
+ atomic_inc(&ps3fb.ext_flip); /* flip off */
ps3fb.dinfo->irq.mask = 0;
- ps3av_register_flip_ctl(NULL, NULL);
if (ps3fb.task) {
struct task_struct *task = ps3fb.task;
ps3fb.task = NULL;
@@ -1296,8 +1289,8 @@ static int ps3fb_shutdown(struct ps3_system_bus_device *dev)
}
static struct ps3_system_bus_driver ps3fb_driver = {
- .match_id = PS3_MATCH_ID_GRAPHICS,
- .match_sub_id = PS3_MATCH_SUB_ID_FB,
+ .match_id = PS3_MATCH_ID_GPU,
+ .match_sub_id = PS3_MATCH_SUB_ID_GPU_FB,
.core.name = DEVICE_NAME,
.core.owner = THIS_MODULE,
.probe = ps3fb_probe,
@@ -1355,4 +1348,4 @@ module_exit(ps3fb_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PS3 GPU Frame Buffer Driver");
MODULE_AUTHOR("Sony Computer Entertainment Inc.");
-MODULE_ALIAS(PS3_MODULE_ALIAS_GRAPHICS);
+MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_FB);
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index d777789b7a89..de2bba5a3440 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -218,8 +218,7 @@ void proc_device_tree_add_node(struct device_node *np,
void __init proc_device_tree_init(void)
{
struct device_node *root;
- if ( !have_of )
- return;
+
proc_device_tree = proc_mkdir("device-tree", NULL);
if (proc_device_tree == 0)
return;
diff --git a/include/linux/of.h b/include/linux/of.h
index e2488f5e7cb2..6a7efa242f5e 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -57,6 +57,12 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
for (child = of_get_next_child(parent, NULL); child != NULL; \
child = of_get_next_child(parent, child))
+extern struct device_node *of_find_node_with_property(
+ struct device_node *from, const char *prop_name);
+#define for_each_node_with_property(dn, prop_name) \
+ for (dn = of_find_node_with_property(NULL, prop_name); dn; \
+ dn = of_find_node_with_property(dn, prop_name))
+
extern struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp);
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 67db101d0eb8..fc2472c3c254 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -14,9 +14,22 @@
#ifndef __LINUX_OF_GPIO_H
#define __LINUX_OF_GPIO_H
+#include <linux/compiler.h>
+#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/gpio.h>
+struct device_node;
+
+/*
+ * This is Linux-specific flags. By default controllers' and Linux' mapping
+ * match, but GPIO controllers are free to translate their own flags to
+ * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended.
+ */
+enum of_gpio_flags {
+ OF_GPIO_ACTIVE_LOW = 0x1,
+};
+
#ifdef CONFIG_OF_GPIO
/*
@@ -26,7 +39,7 @@ struct of_gpio_chip {
struct gpio_chip gc;
int gpio_cells;
int (*xlate)(struct of_gpio_chip *of_gc, struct device_node *np,
- const void *gpio_spec);
+ const void *gpio_spec, enum of_gpio_flags *flags);
};
static inline struct of_gpio_chip *to_of_gpio_chip(struct gpio_chip *gc)
@@ -50,20 +63,43 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
return container_of(of_gc, struct of_mm_gpio_chip, of_gc);
}
-extern int of_get_gpio(struct device_node *np, int index);
+extern int of_get_gpio_flags(struct device_node *np, int index,
+ enum of_gpio_flags *flags);
+extern unsigned int of_gpio_count(struct device_node *np);
+
extern int of_mm_gpiochip_add(struct device_node *np,
struct of_mm_gpio_chip *mm_gc);
extern int of_gpio_simple_xlate(struct of_gpio_chip *of_gc,
struct device_node *np,
- const void *gpio_spec);
+ const void *gpio_spec,
+ enum of_gpio_flags *flags);
#else
/* Drivers may not strictly depend on the GPIO support, so let them link. */
-static inline int of_get_gpio(struct device_node *np, int index)
+static inline int of_get_gpio_flags(struct device_node *np, int index,
+ enum of_gpio_flags *flags)
{
return -ENOSYS;
}
+static inline unsigned int of_gpio_count(struct device_node *np)
+{
+ return 0;
+}
+
#endif /* CONFIG_OF_GPIO */
+/**
+ * of_get_gpio - Get a GPIO number to use with GPIO API
+ * @np: device node to get GPIO from
+ * @index: index of the GPIO
+ *
+ * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
+ * value on the error condition.
+ */
+static inline int of_get_gpio(struct device_node *np, int index)
+{
+ return of_get_gpio_flags(np, index, NULL);
+}
+
#endif /* __LINUX_OF_GPIO_H */
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index 90987b7bcc1b..32c0547ffafc 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -427,9 +427,9 @@ void rio_dev_put(struct rio_dev *);
* Get the unique RIO device identifier. Returns the device
* identifier string.
*/
-static inline char *rio_name(struct rio_dev *rdev)
+static inline const char *rio_name(struct rio_dev *rdev)
{
- return rdev->dev.bus_id;
+ return dev_name(&rdev->dev);
}
/**